[yt-svn] commit/yt: 60 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jan 17 08:12:46 PST 2017


60 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/85f2df763924/
Changeset:   85f2df763924
Branch:      yt
User:        jzuhone
Date:        2016-04-04 19:56:01+00:00
Summary:     First pass at an Athena++ frontend. At this stage it most definitely does not work.
Affected #:  9 files

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/api.py
--- /dev/null
+++ b/yt/frontends/athena++/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.athena++
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from .data_structures import \
+      AthenaPPGrid, \
+      AthenaPPHierarchy, \
+      AthenaPPDataset
+
+from .fields import \
+      AthenaPPFieldInfo
+
+from .io import \
+      IOHandlerAthenaPP
+
+from . import tests

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena++/data_structures.py
@@ -0,0 +1,295 @@
+"""
+Data structures for Athena.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import os
+import weakref
+import glob
+import lxml
+
+from yt.funcs import \
+    mylog, \
+    ensure_tuple
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.data_objects.static_output import \
+    Dataset
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.utilities.file_handler import \
+    HDF5FileHandler
+
+from .fields import AthenaPPFieldInfo
+
+geometry_map = {"3DRectMesh": "cartesian"}
+
+class AthenaPPGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, id, index, level):
+        AMRGridPatch.__init__(self, id, filename = index.index_filename,
+                              index = index)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.ds.refine_by
+        else:
+            LE, RE = self.index.grid_left_edge[id,:], \
+                     self.index.grid_right_edge[id,:]
+            self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
+        if self.ds.dimensionality < 2: self.dds[1] = 1.0
+        if self.ds.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "AthenaPPGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class AthenaPPHierarchy(GridIndex):
+
+    grid = AthenaPPGrid
+    _dataset_type='athena++'
+    _data_file = None
+
+    def __init__(self, ds, dataset_type='athena++'):
+        self.dataset = weakref.proxy(ds)
+        self.directory = os.path.dirname(self.dataset.filename)
+        self.dataset_type = dataset_type
+        # for now, the index file is the dataset!
+        self.index_filename = self.dataset.datafile
+        self._handle = ds._handle
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _detect_output_fields(self):
+        self._field_map = dict((("athena++", k), v) for k, v in self.dataset._field_map.items())
+        self.field_list = list(self._field_map.keys())
+
+    def _count_grids(self):
+        self.num_grids = self._handle.attrs["TotalMeshBlock"][0]
+
+    def _parse_index(self):
+        coord_fields = self.dataset._coord_fields
+        num_grids = self._handle.attrs["TotalMeshBlock"][0]
+
+        self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
+        dds = np.zeros((num_grids, 3), dtype='float64')
+
+        for i in range(num_grids):
+            x = self._handle["MeshBlock%d" % i][coord_fields[0]]
+            y = self._handle["MeshBlock%d" % i][coord_fields[1]]
+            z = self._handle["MeshBlock%d" % i][coord_fields[2]]
+            dds[i] = np.array([x[1]-x[0], y[1]-y[0], z[1]-z[0]], dtype='float64')
+            self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
+            self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
+        self.grid_right_edge = self.grid_left_edge + self.grid_dimensions*dds
+
+        new_dle = np.min(self.grid_left_edge, axis=0)
+        new_dre = np.max(self.grid_right_edge, axis=0)
+        self.dataset.domain_left_edge[:] = np.round(new_dle, decimals=12)[:]
+        self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
+        self.dataset.domain_width = \
+                (self.dataset.domain_right_edge -
+                 self.dataset.domain_left_edge)
+        self.dataset.domain_center = \
+                0.5*(self.dataset.domain_left_edge +
+                     self.dataset.domain_right_edge)
+
+        dx_root = (self.dataset.domain_right_edge-
+                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
+
+        # This next line assumes refine_by == 2!!
+        levels = np.round(np.log2(dds[:,0]/dx_root[0])).astype('int')
+
+        self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
+        self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
+
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in range(num_grids):
+            self.grids[i] = self.grid(i, self, levels[i])
+
+        if self.dataset.dimensionality <= 2:
+            self.grid_right_edge[:,2] = self.dataset.domain_right_edge[2]
+        if self.dataset.dimensionality == 1:
+            self.grid_right_edge[:,1:] = self.dataset.domain_right_edge[1:]
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+        self._reconstruct_parent_child()
+        self.max_level = self._handle.attrs["MaxLevel"][0]
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child.Parent.append(grid)
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
+class AthenaPPDataset(Dataset):
+    _index_class = AthenaPPHierarchy
+    _field_info_class = AthenaPPFieldInfo
+    _dataset_type = "athena++"
+
+    def __init__(self, filename, dataset_type='athena++',
+                 storage_filename=None, parameters=None,
+                 units_override=None, unit_system="cgs"):
+        self.fluid_types += ("athena++",)
+        if self._handle is not None: return
+        if parameters is None:
+            parameters = {}
+        self.specified_parameters = parameters
+        if units_override is None:
+            units_override = {}
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.filename = filename
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+        self.backup_filename = self.filename[:-4] + "_backup.gdf"
+        # Unfortunately we now have to mandate that the index gets
+        # instantiated so that we can make sure we have the correct left
+        # and right domain edges.
+        self.index
+
+    def _set_code_unit_attributes(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            # We set these to cgs for now, but they may be overridden later.
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
+
+    def set_code_units(self):
+        super(AthenaPPDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        vel_unit = getattr(self, "velocity_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+        self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        if vel_unit is None:
+            self.velocity_unit = self.length_unit/self.time_unit
+        self.unit_registry.modify("code_velocity", self.velocity_unit)
+
+    def _parse_parameter_file(self):
+        tree = lxml.etree.parse(self.parameter_filename)
+        root = tree.getroot()
+        grids = root.findall("./Domain/Grid")[0]
+        grid0 = grids[0]
+        geometry = grid0.find("Topology").attrib["TopologyType"]
+        coords = grid0.find("Geometry")
+        self._coord_fields = [coord.text.split("/")[-1] for coord in coords]
+        self._field_map = {}
+        for field in grid0.findall("Attribute"):
+            self._field_map[field.attrib["Name"]] = field[0].text.split("/")[-1]
+        self.datafile = coords[0].text.split(":")[0]
+        self._handle = HDF5FileHandler(self.datafile)
+
+        xmin = self._handle["MeshBlock0"][self._coord_fields[0]][0]
+        ymin = self._handle["MeshBlock0"][self._coord_fields[1]][0]
+        zmin = self._handle["MeshBlock0"][self._coord_fields[2]][0]
+        mylog.info("Temporarily setting a likely bogus domain_right_edge and "
+                   "domain_left_edge. This will be corrected later.")
+        self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
+        self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self._handle.attrs["RootGridSize"]
+
+        self.refine_by = 2
+        dimensionality = 3
+        if self.domain_dimensions[2] == 1 :
+            dimensionality = 2
+        if self.domain_dimensions[1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
+        self.current_time = self._handle.attrs["Time"][0]
+        self.unique_identifier = self.parameter_filename.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+        if 'periodicity' in self.specified_parameters:
+            self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])
+        else:
+            self.periodicity = (True,True,True,)
+        if 'gamma' in self.specified_parameters:
+            self.gamma = float(self.specified_parameters['gamma'])
+        else:
+            self.gamma = 5./3.
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        if "gamma" in self.specified_parameters:
+            self.parameters["Gamma"] = self.specified_parameters["gamma"]
+        else:
+            self.parameters["Gamma"] = 5./3.
+        self.geometry = self.geometry_map.get(geometry, "cartesian")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'athdf5.xdmf' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    @property
+    def _skip_cache(self):
+        return True
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/definitions.py
--- /dev/null
+++ b/yt/frontends/athena++/definitions.py
@@ -0,0 +1,14 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/fields.py
--- /dev/null
+++ b/yt/frontends/athena++/fields.py
@@ -0,0 +1,49 @@
+"""
+Athena++-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.utilities.physical_constants import \
+    kboltz, mh
+
+pres_units = "code_mass/(code_length*code_time**2)"
+rho_units = "code_mass / code_length**3"
+vel_units = "code_length / code_time"
+
+class AthenaPPFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        ("gas_density", (rho_units, ["density"], None)),
+        ("gas_pressure", (pres_units, ["pressure"], None)),
+        ("gas_velocity_x1", (vel_units, ["velocity_x"], None)),
+        ("gas_velocity_x2", (vel_units, ["velocity_y"], None)),
+        ("gas_velocity_x3", (vel_units, ["velocity_z"], None)),
+    )
+
+    def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
+        unit_system = self.ds.unit_system
+        def _temperature(field, data):
+            if data.has_field_parameter("mu"):
+                mu = data.get_field_parameter("mu")
+            else:
+                mu = 0.6
+            return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz
+        self.add_field(("gas","temperature"), function=_temperature,
+                       units=unit_system["temperature"])
+
+        setup_magnetic_field_aliases(self, "athena++", ["bfield_x%d" % ax for ax in (1,2,3)])
+
+

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/io.py
--- /dev/null
+++ b/yt/frontends/athena++/io.py
@@ -0,0 +1,96 @@
+"""
+Athena++-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from itertools import groupby
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.selection_routines import AlwaysSelector
+
+# http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
+def grid_sequences(grids):
+    g_iter = sorted(grids, key = lambda g: g.id)
+    for k, g in groupby(enumerate(g_iter), lambda i_x1:i_x1[0]-i_x1[1].id):
+        seq = list(v[1] for v in g)
+        yield seq
+
+class IOHandlerAthenaPP(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type = "athena++"
+
+    def __init__(self, ds):
+        super(IOHandlerAthenaPP, self).__init__(ds)
+        self._handle = ds._handle
+
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "flash" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            # Always use *native* 64-bit float.
+            rv[field] = np.empty(size, dtype="=f8")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            ind = 0
+            for chunk in chunks:
+                for gs in grid_sequences(chunk.objs):
+                    start = gs[0].id - gs[0]._id_offset
+                    end = gs[-1].id - gs[-1]._id_offset + 1
+                    data = ds[start:end,:,:,:].transpose()
+                    for i, g in enumerate(gs):
+                        ind += g.select(selector, data[...,i], rv[field], ind)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        f = self._handle
+        rv = {}
+        for g in chunk.objs:
+            rv[g.id] = {}
+        # Split into particles and non-particles
+        fluid_fields, particle_fields = [], []
+        for ftype, fname in fields:
+            if ftype in self.ds.particle_types:
+                particle_fields.append((ftype, fname))
+            else:
+                fluid_fields.append((ftype, fname))
+        if len(particle_fields) > 0:
+            selector = AlwaysSelector(self.ds)
+            rv.update(self._read_particle_selection(
+                [chunk], selector, particle_fields))
+        if len(fluid_fields) == 0: return rv
+        for field in fluid_fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            for gs in grid_sequences(chunk.objs):
+                start = gs[0].id - gs[0]._id_offset
+                end = gs[-1].id - gs[-1]._id_offset + 1
+                data = ds[start:end,:,:,:].transpose()
+                for i, g in enumerate(gs):
+                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
+        return rv
+

diff -r 395a07dcd3a54e1840f9ddab9a4b3ed49f780c28 -r 85f2df763924dd7b23a292a4696a565c239972ed yt/frontends/athena++/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena++/tests/test_outputs.py
@@ -0,0 +1,100 @@
+"""
+Athena frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    assert_allclose_units
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.athena.api import AthenaDataset
+from yt.config import ytcfg
+from yt.convenience import load
+
+import yt.units as u
+
+_fields_cloud = ("scalar[0]", "density", "total_energy")
+
+cloud = "ShockCloud/id0/Cloud.0050.vtk"
+ at requires_ds(cloud)
+def test_cloud():
+    ds = data_dir_load(cloud)
+    yield assert_equal, str(ds), "Cloud.0050"
+    for test in small_patch_amr(ds, _fields_cloud):
+        test_cloud.__name__ = test.description
+        yield test
+
+_fields_blast = ("temperature", "density", "velocity_magnitude")
+
+blast = "MHDBlast/id0/Blast.0100.vtk"
+ at requires_ds(blast)
+def test_blast():
+    ds = data_dir_load(blast)
+    yield assert_equal, str(ds), "Blast.0100"
+    for test in small_patch_amr(ds, _fields_blast):
+        test_blast.__name__ = test.description
+        yield test
+
+uo_stripping = {"time_unit":3.086e14,
+                "length_unit":8.0236e22,
+                "mass_unit":9.999e-30*8.0236e22**3}
+
+_fields_stripping = ("temperature", "density", "specific_scalar[0]")
+
+stripping = "RamPressureStripping/id0/rps.0062.vtk"
+ at requires_ds(stripping, big_data=True)
+def test_stripping():
+    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
+    yield assert_equal, str(ds), "rps.0062"
+    for test in small_patch_amr(ds, _fields_stripping):
+        test_stripping.__name__ = test.description
+        yield test
+
+sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
+
+uo_sloshing = {"length_unit": (1.0,"Mpc"),
+               "time_unit": (1.0,"Myr"),
+               "mass_unit": (1.0e14,"Msun")}
+
+ at requires_file(sloshing)
+def test_nprocs():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds1 = load(sloshing, units_override=uo_sloshing)
+    sp1 = ds1.sphere("c", (100.,"kpc"))
+    prj1 = ds1.proj("density",0)
+    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
+    sp2 = ds2.sphere("c", (100.,"kpc"))
+    prj2 = ds1.proj("density",0)
+
+    ds3 = load(sloshing, parameters=uo_sloshing)
+    assert_equal(ds3.length_unit, u.Mpc)
+    assert_equal(ds3.time_unit, u.Myr)
+    assert_equal(ds3.mass_unit, 1e14*u.Msun)
+
+    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    for ax in "xyz":
+        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
+    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
+    yield assert_equal, prj1["density"], prj2["density"]
+
+    ytcfg["yt","skip_dataset_cache"] = "False"
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+    assert isinstance(data_dir_load(cloud), AthenaDataset)


https://bitbucket.org/yt_analysis/yt/commits/5e374efdff38/
Changeset:   5e374efdff38
Branch:      yt
User:        jzuhone
Date:        2016-04-04 20:12:24+00:00
Summary:     Renamed directory, bug fixes
Affected #:  19 files

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -20,6 +20,7 @@
     'art',
     'artio',
     'athena',
+    'athena_pp',
     'boxlib',
     'chombo',
     'eagle',

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/api.py
--- a/yt/frontends/athena++/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.athena++
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from .data_structures import \
-      AthenaPPGrid, \
-      AthenaPPHierarchy, \
-      AthenaPPDataset
-
-from .fields import \
-      AthenaPPFieldInfo
-
-from .io import \
-      IOHandlerAthenaPP
-
-from . import tests

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/data_structures.py
--- a/yt/frontends/athena++/data_structures.py
+++ /dev/null
@@ -1,295 +0,0 @@
-"""
-Data structures for Athena.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import os
-import weakref
-import glob
-import lxml
-
-from yt.funcs import \
-    mylog, \
-    ensure_tuple
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
-from yt.geometry.grid_geometry_handler import \
-    GridIndex
-from yt.data_objects.static_output import \
-    Dataset
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
-from yt.geometry.geometry_handler import \
-    YTDataChunk
-from yt.utilities.file_handler import \
-    HDF5FileHandler
-
-from .fields import AthenaPPFieldInfo
-
-geometry_map = {"3DRectMesh": "cartesian"}
-
-class AthenaPPGrid(AMRGridPatch):
-    _id_offset = 0
-
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = []
-        self.Children = []
-        self.Level = level
-
-    def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.ds.refine_by
-        else:
-            LE, RE = self.index.grid_left_edge[id,:], \
-                     self.index.grid_right_edge[id,:]
-            self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
-        if self.ds.dimensionality < 2: self.dds[1] = 1.0
-        if self.ds.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    def __repr__(self):
-        return "AthenaPPGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-
-class AthenaPPHierarchy(GridIndex):
-
-    grid = AthenaPPGrid
-    _dataset_type='athena++'
-    _data_file = None
-
-    def __init__(self, ds, dataset_type='athena++'):
-        self.dataset = weakref.proxy(ds)
-        self.directory = os.path.dirname(self.dataset.filename)
-        self.dataset_type = dataset_type
-        # for now, the index file is the dataset!
-        self.index_filename = self.dataset.datafile
-        self._handle = ds._handle
-        GridIndex.__init__(self, ds, dataset_type)
-
-    def _detect_output_fields(self):
-        self._field_map = dict((("athena++", k), v) for k, v in self.dataset._field_map.items())
-        self.field_list = list(self._field_map.keys())
-
-    def _count_grids(self):
-        self.num_grids = self._handle.attrs["TotalMeshBlock"][0]
-
-    def _parse_index(self):
-        coord_fields = self.dataset._coord_fields
-        num_grids = self._handle.attrs["TotalMeshBlock"][0]
-
-        self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
-        self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
-        self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
-        dds = np.zeros((num_grids, 3), dtype='float64')
-
-        for i in range(num_grids):
-            x = self._handle["MeshBlock%d" % i][coord_fields[0]]
-            y = self._handle["MeshBlock%d" % i][coord_fields[1]]
-            z = self._handle["MeshBlock%d" % i][coord_fields[2]]
-            dds[i] = np.array([x[1]-x[0], y[1]-y[0], z[1]-z[0]], dtype='float64')
-            self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
-            self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
-        self.grid_right_edge = self.grid_left_edge + self.grid_dimensions*dds
-
-        new_dle = np.min(self.grid_left_edge, axis=0)
-        new_dre = np.max(self.grid_right_edge, axis=0)
-        self.dataset.domain_left_edge[:] = np.round(new_dle, decimals=12)[:]
-        self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
-        self.dataset.domain_width = \
-                (self.dataset.domain_right_edge -
-                 self.dataset.domain_left_edge)
-        self.dataset.domain_center = \
-                0.5*(self.dataset.domain_left_edge +
-                     self.dataset.domain_right_edge)
-
-        dx_root = (self.dataset.domain_right_edge-
-                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
-
-        # This next line assumes refine_by == 2!!
-        levels = np.round(np.log2(dds[:,0]/dx_root[0])).astype('int')
-
-        self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
-        self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
-
-        self.grids = np.empty(self.num_grids, dtype='object')
-        for i in range(num_grids):
-            self.grids[i] = self.grid(i, self, levels[i])
-
-        if self.dataset.dimensionality <= 2:
-            self.grid_right_edge[:,2] = self.dataset.domain_right_edge[2]
-        if self.dataset.dimensionality == 1:
-            self.grid_right_edge[:,1:] = self.dataset.domain_right_edge[1:]
-        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
-
-    def _populate_grid_objects(self):
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-        self._reconstruct_parent_child()
-        self.max_level = self._handle.attrs["MaxLevel"][0]
-
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child.Parent.append(grid)
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
-    def _chunk_io(self, dobj, cache = True, local_only = False):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for subset in gobjs:
-            yield YTDataChunk(dobj, "io", [subset],
-                              self._count_selection(dobj, [subset]),
-                              cache = cache)
-
-class AthenaPPDataset(Dataset):
-    _index_class = AthenaPPHierarchy
-    _field_info_class = AthenaPPFieldInfo
-    _dataset_type = "athena++"
-
-    def __init__(self, filename, dataset_type='athena++',
-                 storage_filename=None, parameters=None,
-                 units_override=None, unit_system="cgs"):
-        self.fluid_types += ("athena++",)
-        if self._handle is not None: return
-        if parameters is None:
-            parameters = {}
-        self.specified_parameters = parameters
-        if units_override is None:
-            units_override = {}
-        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
-                         unit_system=unit_system)
-        self.filename = filename
-        if storage_filename is None:
-            storage_filename = '%s.yt' % filename.split('/')[-1]
-        self.storage_filename = storage_filename
-        self.backup_filename = self.filename[:-4] + "_backup.gdf"
-        # Unfortunately we now have to mandate that the index gets
-        # instantiated so that we can make sure we have the correct left
-        # and right domain edges.
-        self.index
-
-    def _set_code_unit_attributes(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        if "length_unit" not in self.units_override:
-            self.no_cgs_equiv_length = True
-        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may be overridden later.
-            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
-            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
-
-    def set_code_units(self):
-        super(AthenaPPDataset, self).set_code_units()
-        mag_unit = getattr(self, "magnetic_unit", None)
-        vel_unit = getattr(self, "velocity_unit", None)
-        if mag_unit is None:
-            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                         (self.time_unit**2 * self.length_unit))
-        self.magnetic_unit.convert_to_units("gauss")
-        self.unit_registry.modify("code_magnetic", self.magnetic_unit)
-        if vel_unit is None:
-            self.velocity_unit = self.length_unit/self.time_unit
-        self.unit_registry.modify("code_velocity", self.velocity_unit)
-
-    def _parse_parameter_file(self):
-        tree = lxml.etree.parse(self.parameter_filename)
-        root = tree.getroot()
-        grids = root.findall("./Domain/Grid")[0]
-        grid0 = grids[0]
-        geometry = grid0.find("Topology").attrib["TopologyType"]
-        coords = grid0.find("Geometry")
-        self._coord_fields = [coord.text.split("/")[-1] for coord in coords]
-        self._field_map = {}
-        for field in grid0.findall("Attribute"):
-            self._field_map[field.attrib["Name"]] = field[0].text.split("/")[-1]
-        self.datafile = coords[0].text.split(":")[0]
-        self._handle = HDF5FileHandler(self.datafile)
-
-        xmin = self._handle["MeshBlock0"][self._coord_fields[0]][0]
-        ymin = self._handle["MeshBlock0"][self._coord_fields[1]][0]
-        zmin = self._handle["MeshBlock0"][self._coord_fields[2]][0]
-        mylog.info("Temporarily setting a likely bogus domain_right_edge and "
-                   "domain_left_edge. This will be corrected later.")
-        self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
-        self.domain_right_edge = -self.domain_left_edge
-        self.domain_width = self.domain_right_edge-self.domain_left_edge
-        self.domain_dimensions = self._handle.attrs["RootGridSize"]
-
-        self.refine_by = 2
-        dimensionality = 3
-        if self.domain_dimensions[2] == 1 :
-            dimensionality = 2
-        if self.domain_dimensions[1] == 1 :
-            dimensionality = 1
-        self.dimensionality = dimensionality
-        self.current_time = self._handle.attrs["Time"][0]
-        self.unique_identifier = self.parameter_filename.__hash__()
-        self.cosmological_simulation = False
-        self.num_ghost_zones = 0
-        self.field_ordering = 'fortran'
-        self.boundary_conditions = [1]*6
-        if 'periodicity' in self.specified_parameters:
-            self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])
-        else:
-            self.periodicity = (True,True,True,)
-        if 'gamma' in self.specified_parameters:
-            self.gamma = float(self.specified_parameters['gamma'])
-        else:
-            self.gamma = 5./3.
-
-        self.current_redshift = self.omega_lambda = self.omega_matter = \
-            self.hubble_constant = self.cosmological_simulation = 0.0
-        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
-        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
-        if "gamma" in self.specified_parameters:
-            self.parameters["Gamma"] = self.specified_parameters["gamma"]
-        else:
-            self.parameters["Gamma"] = 5./3.
-        self.geometry = self.geometry_map.get(geometry, "cartesian")
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            if 'athdf5.xdmf' in args[0]:
-                return True
-        except:
-            pass
-        return False
-
-    @property
-    def _skip_cache(self):
-        return True
-
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/definitions.py
--- a/yt/frontends/athena++/definitions.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/fields.py
--- a/yt/frontends/athena++/fields.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
-Athena++-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.fields.field_info_container import \
-    FieldInfoContainer
-from yt.utilities.physical_constants import \
-    kboltz, mh
-
-pres_units = "code_mass/(code_length*code_time**2)"
-rho_units = "code_mass / code_length**3"
-vel_units = "code_length / code_time"
-
-class AthenaPPFieldInfo(FieldInfoContainer):
-    known_other_fields = (
-        ("gas_density", (rho_units, ["density"], None)),
-        ("gas_pressure", (pres_units, ["pressure"], None)),
-        ("gas_velocity_x1", (vel_units, ["velocity_x"], None)),
-        ("gas_velocity_x2", (vel_units, ["velocity_y"], None)),
-        ("gas_velocity_x3", (vel_units, ["velocity_z"], None)),
-    )
-
-    def setup_fluid_fields(self):
-        from yt.fields.magnetic_field import \
-            setup_magnetic_field_aliases
-        unit_system = self.ds.unit_system
-        def _temperature(field, data):
-            if data.has_field_parameter("mu"):
-                mu = data.get_field_parameter("mu")
-            else:
-                mu = 0.6
-            return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz
-        self.add_field(("gas","temperature"), function=_temperature,
-                       units=unit_system["temperature"])
-
-        setup_magnetic_field_aliases(self, "athena++", ["bfield_x%d" % ax for ax in (1,2,3)])
-
-

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/io.py
--- a/yt/frontends/athena++/io.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Athena++-specific IO functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from itertools import groupby
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-from yt.utilities.logger import ytLogger as mylog
-from yt.geometry.selection_routines import AlwaysSelector
-
-# http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
-def grid_sequences(grids):
-    g_iter = sorted(grids, key = lambda g: g.id)
-    for k, g in groupby(enumerate(g_iter), lambda i_x1:i_x1[0]-i_x1[1].id):
-        seq = list(v[1] for v in g)
-        yield seq
-
-class IOHandlerAthenaPP(BaseIOHandler):
-    _particle_reader = False
-    _dataset_type = "athena++"
-
-    def __init__(self, ds):
-        super(IOHandlerAthenaPP, self).__init__(ds)
-        self._handle = ds._handle
-
-    def _read_particles(self, fields_to_read, type, args, grid_list,
-            count_list, conv_factors):
-        pass
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        chunks = list(chunks)
-        if any((ftype != "flash" for ftype, fname in fields)):
-            raise NotImplementedError
-        f = self._handle
-        rv = {}
-        for field in fields:
-            ftype, fname = field
-            # Always use *native* 64-bit float.
-            rv[field] = np.empty(size, dtype="=f8")
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s cells of %s fields in %s blocks",
-                    size, [f2 for f1, f2 in fields], ng)
-        for field in fields:
-            ftype, fname = field
-            ds = f["/%s" % fname]
-            ind = 0
-            for chunk in chunks:
-                for gs in grid_sequences(chunk.objs):
-                    start = gs[0].id - gs[0]._id_offset
-                    end = gs[-1].id - gs[-1]._id_offset + 1
-                    data = ds[start:end,:,:,:].transpose()
-                    for i, g in enumerate(gs):
-                        ind += g.select(selector, data[...,i], rv[field], ind)
-        return rv
-
-    def _read_chunk_data(self, chunk, fields):
-        f = self._handle
-        rv = {}
-        for g in chunk.objs:
-            rv[g.id] = {}
-        # Split into particles and non-particles
-        fluid_fields, particle_fields = [], []
-        for ftype, fname in fields:
-            if ftype in self.ds.particle_types:
-                particle_fields.append((ftype, fname))
-            else:
-                fluid_fields.append((ftype, fname))
-        if len(particle_fields) > 0:
-            selector = AlwaysSelector(self.ds)
-            rv.update(self._read_particle_selection(
-                [chunk], selector, particle_fields))
-        if len(fluid_fields) == 0: return rv
-        for field in fluid_fields:
-            ftype, fname = field
-            ds = f["/%s" % fname]
-            for gs in grid_sequences(chunk.objs):
-                start = gs[0].id - gs[0]._id_offset
-                end = gs[-1].id - gs[-1]._id_offset + 1
-                data = ds[start:end,:,:,:].transpose()
-                for i, g in enumerate(gs):
-                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
-        return rv
-

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena++/tests/test_outputs.py
--- a/yt/frontends/athena++/tests/test_outputs.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""
-Athena frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    assert_allclose_units
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.athena.api import AthenaDataset
-from yt.config import ytcfg
-from yt.convenience import load
-
-import yt.units as u
-
-_fields_cloud = ("scalar[0]", "density", "total_energy")
-
-cloud = "ShockCloud/id0/Cloud.0050.vtk"
- at requires_ds(cloud)
-def test_cloud():
-    ds = data_dir_load(cloud)
-    yield assert_equal, str(ds), "Cloud.0050"
-    for test in small_patch_amr(ds, _fields_cloud):
-        test_cloud.__name__ = test.description
-        yield test
-
-_fields_blast = ("temperature", "density", "velocity_magnitude")
-
-blast = "MHDBlast/id0/Blast.0100.vtk"
- at requires_ds(blast)
-def test_blast():
-    ds = data_dir_load(blast)
-    yield assert_equal, str(ds), "Blast.0100"
-    for test in small_patch_amr(ds, _fields_blast):
-        test_blast.__name__ = test.description
-        yield test
-
-uo_stripping = {"time_unit":3.086e14,
-                "length_unit":8.0236e22,
-                "mass_unit":9.999e-30*8.0236e22**3}
-
-_fields_stripping = ("temperature", "density", "specific_scalar[0]")
-
-stripping = "RamPressureStripping/id0/rps.0062.vtk"
- at requires_ds(stripping, big_data=True)
-def test_stripping():
-    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
-    yield assert_equal, str(ds), "rps.0062"
-    for test in small_patch_amr(ds, _fields_stripping):
-        test_stripping.__name__ = test.description
-        yield test
-
-sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
-
-uo_sloshing = {"length_unit": (1.0,"Mpc"),
-               "time_unit": (1.0,"Myr"),
-               "mass_unit": (1.0e14,"Msun")}
-
- at requires_file(sloshing)
-def test_nprocs():
-    ytcfg["yt","skip_dataset_cache"] = "True"
-
-    ds1 = load(sloshing, units_override=uo_sloshing)
-    sp1 = ds1.sphere("c", (100.,"kpc"))
-    prj1 = ds1.proj("density",0)
-    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
-    sp2 = ds2.sphere("c", (100.,"kpc"))
-    prj2 = ds1.proj("density",0)
-
-    ds3 = load(sloshing, parameters=uo_sloshing)
-    assert_equal(ds3.length_unit, u.Mpc)
-    assert_equal(ds3.time_unit, u.Myr)
-    assert_equal(ds3.mass_unit, 1e14*u.Msun)
-
-    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
-    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
-    for ax in "xyz":
-        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
-    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
-    yield assert_equal, prj1["density"], prj2["density"]
-
-    ytcfg["yt","skip_dataset_cache"] = "False"
-
- at requires_file(cloud)
-def test_AthenaDataset():
-    assert isinstance(data_dir_load(cloud), AthenaDataset)

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/api.py
--- /dev/null
+++ b/yt/frontends/athena_pp/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.athena++
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from .data_structures import \
+      AthenaPPGrid, \
+      AthenaPPHierarchy, \
+      AthenaPPDataset
+
+from .fields import \
+      AthenaPPFieldInfo
+
+from .io import \
+      IOHandlerAthenaPP
+
+from . import tests

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/data_structures.py
--- /dev/null
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -0,0 +1,293 @@
+"""
+Data structures for Athena.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import os
+import weakref
+
+from yt.funcs import \
+    mylog, \
+    ensure_tuple
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.data_objects.static_output import \
+    Dataset
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.utilities.file_handler import \
+    HDF5FileHandler
+
+from .fields import AthenaPPFieldInfo
+
+geometry_map = {"3DRectMesh": "cartesian"}
+
+class AthenaPPGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, id, index, level):
+        AMRGridPatch.__init__(self, id, filename = index.index_filename,
+                              index = index)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.ds.refine_by
+        else:
+            LE, RE = self.index.grid_left_edge[id,:], \
+                     self.index.grid_right_edge[id,:]
+            self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
+        if self.ds.dimensionality < 2: self.dds[1] = 1.0
+        if self.ds.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "AthenaPPGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class AthenaPPHierarchy(GridIndex):
+
+    grid = AthenaPPGrid
+    _dataset_type='athena++'
+    _data_file = None
+
+    def __init__(self, ds, dataset_type='athena++'):
+        self.dataset = weakref.proxy(ds)
+        self.directory = os.path.dirname(self.dataset.filename)
+        self.dataset_type = dataset_type
+        # for now, the index file is the dataset!
+        self.index_filename = self.dataset.datafile
+        self._handle = ds._handle
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _detect_output_fields(self):
+        self._field_map = dict((("athena++", k), v) for k, v in self.dataset._field_map.items())
+        self.field_list = list(self._field_map.keys())
+
+    def _count_grids(self):
+        self.num_grids = self._handle.attrs["TotalMeshBlock"][0]
+
+    def _parse_index(self):
+        coord_fields = self.dataset._coord_fields
+        num_grids = self._handle.attrs["TotalMeshBlock"][0]
+
+        self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
+        self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
+        dds = np.zeros((num_grids, 3), dtype='float64')
+
+        for i in range(num_grids):
+            x = self._handle["MeshBlock%d" % i][coord_fields[0]]
+            y = self._handle["MeshBlock%d" % i][coord_fields[1]]
+            z = self._handle["MeshBlock%d" % i][coord_fields[2]]
+            dds[i] = np.array([x[1]-x[0], y[1]-y[0], z[1]-z[0]], dtype='float64')
+            self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
+            self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
+        self.grid_right_edge = self.grid_left_edge + self.grid_dimensions*dds
+
+        new_dle = np.min(self.grid_left_edge, axis=0)
+        new_dre = np.max(self.grid_right_edge, axis=0)
+        self.dataset.domain_left_edge[:] = np.round(new_dle, decimals=12)[:]
+        self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
+        self.dataset.domain_width = \
+                (self.dataset.domain_right_edge -
+                 self.dataset.domain_left_edge)
+        self.dataset.domain_center = \
+                0.5*(self.dataset.domain_left_edge +
+                     self.dataset.domain_right_edge)
+
+        dx_root = (self.dataset.domain_right_edge-
+                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
+
+        # This next line assumes refine_by == 2!!
+        levels = np.round(np.log2(dds[:,0]/dx_root[0])).astype('int')
+
+        self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
+        self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
+
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in range(num_grids):
+            self.grids[i] = self.grid(i, self, levels[i])
+
+        if self.dataset.dimensionality <= 2:
+            self.grid_right_edge[:,2] = self.dataset.domain_right_edge[2]
+        if self.dataset.dimensionality == 1:
+            self.grid_right_edge[:,1:] = self.dataset.domain_right_edge[1:]
+        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+        self._reconstruct_parent_child()
+        self.max_level = self._handle.attrs["MaxLevel"][0]
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child.Parent.append(grid)
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _chunk_io(self, dobj, cache = True, local_only = False):
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in gobjs:
+            yield YTDataChunk(dobj, "io", [subset],
+                              self._count_selection(dobj, [subset]),
+                              cache = cache)
+
+class AthenaPPDataset(Dataset):
+    _index_class = AthenaPPHierarchy
+    _field_info_class = AthenaPPFieldInfo
+    _dataset_type = "athena++"
+
+    def __init__(self, filename, dataset_type='athena++',
+                 storage_filename=None, parameters=None,
+                 units_override=None, unit_system="cgs"):
+        self.fluid_types += ("athena++",)
+        if parameters is None:
+            parameters = {}
+        self.specified_parameters = parameters
+        if units_override is None:
+            units_override = {}
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.filename = filename
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+        self.backup_filename = self.filename[:-4] + "_backup.gdf"
+        # Unfortunately we now have to mandate that the index gets
+        # instantiated so that we can make sure we have the correct left
+        # and right domain edges.
+        self.index
+
+    def _set_code_unit_attributes(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        if "length_unit" not in self.units_override:
+            self.no_cgs_equiv_length = True
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            # We set these to cgs for now, but they may be overridden later.
+            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+            setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
+
+    def set_code_units(self):
+        super(AthenaPPDataset, self).set_code_units()
+        mag_unit = getattr(self, "magnetic_unit", None)
+        vel_unit = getattr(self, "velocity_unit", None)
+        if mag_unit is None:
+            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit.convert_to_units("gauss")
+        self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        if vel_unit is None:
+            self.velocity_unit = self.length_unit/self.time_unit
+        self.unit_registry.modify("code_velocity", self.velocity_unit)
+
+    def _parse_parameter_file(self):
+        from lxml import etree
+        tree = etree.parse(self.parameter_filename)
+        root = tree.getroot()
+        grids = root.findall("./Domain/Grid")[0]
+        grid0 = grids[0]
+        geometry = grid0.find("Topology").attrib["TopologyType"]
+        coords = grid0.find("Geometry")
+        self._coord_fields = [coord.text.split("/")[-1] for coord in coords]
+        self._field_map = {}
+        for field in grid0.findall("Attribute"):
+            self._field_map[field.attrib["Name"]] = field[0].text.split("/")[-1]
+        self.datafile = coords[0].text.split(":")[0]
+        self._handle = HDF5FileHandler(self.datafile)
+
+        xmin = self._handle["MeshBlock0"][self._coord_fields[0]][0]
+        ymin = self._handle["MeshBlock0"][self._coord_fields[1]][0]
+        zmin = self._handle["MeshBlock0"][self._coord_fields[2]][0]
+        mylog.info("Temporarily setting a likely bogus domain_right_edge and "
+                   "domain_left_edge. This will be corrected later.")
+        self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
+        self.domain_right_edge = -self.domain_left_edge
+        self.domain_width = self.domain_right_edge-self.domain_left_edge
+        self.domain_dimensions = self._handle.attrs["RootGridSize"]
+
+        self.refine_by = 2
+        dimensionality = 3
+        if self.domain_dimensions[2] == 1 :
+            dimensionality = 2
+        if self.domain_dimensions[1] == 1 :
+            dimensionality = 1
+        self.dimensionality = dimensionality
+        self.current_time = self._handle.attrs["Time"][0]
+        self.unique_identifier = self.parameter_filename.__hash__()
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.field_ordering = 'fortran'
+        self.boundary_conditions = [1]*6
+        if 'periodicity' in self.specified_parameters:
+            self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])
+        else:
+            self.periodicity = (True,True,True,)
+        if 'gamma' in self.specified_parameters:
+            self.gamma = float(self.specified_parameters['gamma'])
+        else:
+            self.gamma = 5./3.
+
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        if "gamma" in self.specified_parameters:
+            self.parameters["Gamma"] = self.specified_parameters["gamma"]
+        else:
+            self.parameters["Gamma"] = 5./3.
+        self.geometry = geometry_map.get(geometry, "cartesian")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if 'athdf5.xdmf' in args[0]:
+                return True
+        except:
+            pass
+        return False
+
+    @property
+    def _skip_cache(self):
+        return True
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/definitions.py
--- /dev/null
+++ b/yt/frontends/athena_pp/definitions.py
@@ -0,0 +1,14 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/fields.py
--- /dev/null
+++ b/yt/frontends/athena_pp/fields.py
@@ -0,0 +1,49 @@
+"""
+Athena++-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.utilities.physical_constants import \
+    kboltz, mh
+
+pres_units = "code_mass/(code_length*code_time**2)"
+rho_units = "code_mass / code_length**3"
+vel_units = "code_length / code_time"
+
+class AthenaPPFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        ("gas_density", (rho_units, ["density"], None)),
+        ("gas_pressure", (pres_units, ["pressure"], None)),
+        ("gas_velocity_x1", (vel_units, ["velocity_x"], None)),
+        ("gas_velocity_x2", (vel_units, ["velocity_y"], None)),
+        ("gas_velocity_x3", (vel_units, ["velocity_z"], None)),
+    )
+
+    def setup_fluid_fields(self):
+        from yt.fields.magnetic_field import \
+            setup_magnetic_field_aliases
+        unit_system = self.ds.unit_system
+        def _temperature(field, data):
+            if data.has_field_parameter("mu"):
+                mu = data.get_field_parameter("mu")
+            else:
+                mu = 0.6
+            return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz
+        self.add_field(("gas","temperature"), function=_temperature,
+                       units=unit_system["temperature"])
+
+        setup_magnetic_field_aliases(self, "athena++", ["bfield_x%d" % ax for ax in (1,2,3)])
+
+

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/io.py
--- /dev/null
+++ b/yt/frontends/athena_pp/io.py
@@ -0,0 +1,96 @@
+"""
+Athena++-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from itertools import groupby
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.selection_routines import AlwaysSelector
+
+# http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
+def grid_sequences(grids):
+    g_iter = sorted(grids, key = lambda g: g.id)
+    for k, g in groupby(enumerate(g_iter), lambda i_x1:i_x1[0]-i_x1[1].id):
+        seq = list(v[1] for v in g)
+        yield seq
+
+class IOHandlerAthenaPP(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type = "athena++"
+
+    def __init__(self, ds):
+        super(IOHandlerAthenaPP, self).__init__(ds)
+        self._handle = ds._handle
+
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "flash" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            # Always use *native* 64-bit float.
+            rv[field] = np.empty(size, dtype="=f8")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            ind = 0
+            for chunk in chunks:
+                for gs in grid_sequences(chunk.objs):
+                    start = gs[0].id - gs[0]._id_offset
+                    end = gs[-1].id - gs[-1]._id_offset + 1
+                    data = ds[start:end,:,:,:].transpose()
+                    for i, g in enumerate(gs):
+                        ind += g.select(selector, data[...,i], rv[field], ind)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        f = self._handle
+        rv = {}
+        for g in chunk.objs:
+            rv[g.id] = {}
+        # Split into particles and non-particles
+        fluid_fields, particle_fields = [], []
+        for ftype, fname in fields:
+            if ftype in self.ds.particle_types:
+                particle_fields.append((ftype, fname))
+            else:
+                fluid_fields.append((ftype, fname))
+        if len(particle_fields) > 0:
+            selector = AlwaysSelector(self.ds)
+            rv.update(self._read_particle_selection(
+                [chunk], selector, particle_fields))
+        if len(fluid_fields) == 0: return rv
+        for field in fluid_fields:
+            ftype, fname = field
+            ds = f["/%s" % fname]
+            for gs in grid_sequences(chunk.objs):
+                start = gs[0].id - gs[0]._id_offset
+                end = gs[-1].id - gs[-1]._id_offset + 1
+                data = ds[start:end,:,:,:].transpose()
+                for i, g in enumerate(gs):
+                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
+        return rv
+

diff -r 85f2df763924dd7b23a292a4696a565c239972ed -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 yt/frontends/athena_pp/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena_pp/tests/test_outputs.py
@@ -0,0 +1,100 @@
+"""
+Athena frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    assert_allclose_units
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.athena.api import AthenaDataset
+from yt.config import ytcfg
+from yt.convenience import load
+
+import yt.units as u
+
+_fields_cloud = ("scalar[0]", "density", "total_energy")
+
+cloud = "ShockCloud/id0/Cloud.0050.vtk"
+ at requires_ds(cloud)
+def test_cloud():
+    ds = data_dir_load(cloud)
+    yield assert_equal, str(ds), "Cloud.0050"
+    for test in small_patch_amr(ds, _fields_cloud):
+        test_cloud.__name__ = test.description
+        yield test
+
+_fields_blast = ("temperature", "density", "velocity_magnitude")
+
+blast = "MHDBlast/id0/Blast.0100.vtk"
+ at requires_ds(blast)
+def test_blast():
+    ds = data_dir_load(blast)
+    yield assert_equal, str(ds), "Blast.0100"
+    for test in small_patch_amr(ds, _fields_blast):
+        test_blast.__name__ = test.description
+        yield test
+
+uo_stripping = {"time_unit":3.086e14,
+                "length_unit":8.0236e22,
+                "mass_unit":9.999e-30*8.0236e22**3}
+
+_fields_stripping = ("temperature", "density", "specific_scalar[0]")
+
+stripping = "RamPressureStripping/id0/rps.0062.vtk"
+ at requires_ds(stripping, big_data=True)
+def test_stripping():
+    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
+    yield assert_equal, str(ds), "rps.0062"
+    for test in small_patch_amr(ds, _fields_stripping):
+        test_stripping.__name__ = test.description
+        yield test
+
+sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
+
+uo_sloshing = {"length_unit": (1.0,"Mpc"),
+               "time_unit": (1.0,"Myr"),
+               "mass_unit": (1.0e14,"Msun")}
+
+ at requires_file(sloshing)
+def test_nprocs():
+    ytcfg["yt","skip_dataset_cache"] = "True"
+
+    ds1 = load(sloshing, units_override=uo_sloshing)
+    sp1 = ds1.sphere("c", (100.,"kpc"))
+    prj1 = ds1.proj("density",0)
+    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
+    sp2 = ds2.sphere("c", (100.,"kpc"))
+    prj2 = ds1.proj("density",0)
+
+    ds3 = load(sloshing, parameters=uo_sloshing)
+    assert_equal(ds3.length_unit, u.Mpc)
+    assert_equal(ds3.time_unit, u.Myr)
+    assert_equal(ds3.mass_unit, 1e14*u.Msun)
+
+    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
+    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    for ax in "xyz":
+        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
+    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
+    yield assert_equal, prj1["density"], prj2["density"]
+
+    ytcfg["yt","skip_dataset_cache"] = "False"
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+    assert isinstance(data_dir_load(cloud), AthenaDataset)


https://bitbucket.org/yt_analysis/yt/commits/e431a84c0ba1/
Changeset:   e431a84c0ba1
Branch:      yt
User:        jzuhone
Date:        2016-04-04 20:15:28+00:00
Summary:     wrong suffix
Affected #:  1 file

diff -r 5e374efdff38431dacf5f6c8696ef0427371c7f7 -r e431a84c0ba1bee214d37a1099bc093da06eaea2 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -279,7 +279,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            if 'athdf5.xdmf' in args[0]:
+            if 'athdf.xdmf' in args[0]:
                 return True
         except:
             pass


https://bitbucket.org/yt_analysis/yt/commits/f01f6a7cd8ae/
Changeset:   f01f6a7cd8ae
Branch:      yt
User:        jzuhone
Date:        2016-04-05 17:56:59+00:00
Summary:     Simple way to handle these
Affected #:  1 file

diff -r e431a84c0ba1bee214d37a1099bc093da06eaea2 -r f01f6a7cd8aed8f4bad038fef58ea4f0661e2113 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -93,16 +93,16 @@
         self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
         self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
         self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
-        dds = np.zeros((num_grids, 3), dtype='float64')
+        levels = np.zeros(num_grids, dtype='int32')
 
         for i in range(num_grids):
             x = self._handle["MeshBlock%d" % i][coord_fields[0]]
             y = self._handle["MeshBlock%d" % i][coord_fields[1]]
             z = self._handle["MeshBlock%d" % i][coord_fields[2]]
-            dds[i] = np.array([x[1]-x[0], y[1]-y[0], z[1]-z[0]], dtype='float64')
             self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
+            self.grid_right_edge[i] = np.array([x[-1], y[-1], z[-1]], dtype='float64')
             self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
-        self.grid_right_edge = self.grid_left_edge + self.grid_dimensions*dds
+            levels[i] = self._handle["MeshBlock%d" % i].attrs["Level"][0]
 
         new_dle = np.min(self.grid_left_edge, axis=0)
         new_dre = np.max(self.grid_right_edge, axis=0)
@@ -115,12 +115,6 @@
                 0.5*(self.dataset.domain_left_edge +
                      self.dataset.domain_right_edge)
 
-        dx_root = (self.dataset.domain_right_edge-
-                   self.dataset.domain_left_edge)/self.dataset.domain_dimensions
-
-        # This next line assumes refine_by == 2!!
-        levels = np.round(np.log2(dds[:,0]/dx_root[0])).astype('int')
-
         self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
         self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
 


https://bitbucket.org/yt_analysis/yt/commits/56a6eb2634e2/
Changeset:   56a6eb2634e2
Branch:      yt
User:        jzuhone
Date:        2016-04-11 18:48:47+00:00
Summary:     Correct I/O for Athena++
Affected #:  1 file

diff -r f01f6a7cd8aed8f4bad038fef58ea4f0661e2113 -r 56a6eb2634e25afbeb95e0f7cc426960ae5af4c8 yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -19,7 +19,6 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
-from yt.geometry.selection_routines import AlwaysSelector
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def grid_sequences(grids):
@@ -42,28 +41,24 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype != "flash" for ftype, fname in fields)):
+        if any((ftype != "athena++" for ftype, fname in fields)):
             raise NotImplementedError
         f = self._handle
         rv = {}
         for field in fields:
-            ftype, fname = field
             # Always use *native* 64-bit float.
             rv[field] = np.empty(size, dtype="=f8")
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
-            ftype, fname = field
-            ds = f["/%s" % fname]
+            f_fname = self.ds._field_map[field[1]]
             ind = 0
             for chunk in chunks:
                 for gs in grid_sequences(chunk.objs):
-                    start = gs[0].id - gs[0]._id_offset
-                    end = gs[-1].id - gs[-1]._id_offset + 1
-                    data = ds[start:end,:,:,:].transpose()
                     for i, g in enumerate(gs):
-                        ind += g.select(selector, data[...,i], rv[field], ind)
+                        data = f["MeshBlock%d" % g.id][f_fname][:,:,:].transpose()
+                        ind += g.select(selector, data, rv[field], ind)
         return rv
 
     def _read_chunk_data(self, chunk, fields):
@@ -71,26 +66,13 @@
         rv = {}
         for g in chunk.objs:
             rv[g.id] = {}
-        # Split into particles and non-particles
-        fluid_fields, particle_fields = [], []
-        for ftype, fname in fields:
-            if ftype in self.ds.particle_types:
-                particle_fields.append((ftype, fname))
-            else:
-                fluid_fields.append((ftype, fname))
-        if len(particle_fields) > 0:
-            selector = AlwaysSelector(self.ds)
-            rv.update(self._read_particle_selection(
-                [chunk], selector, particle_fields))
-        if len(fluid_fields) == 0: return rv
-        for field in fluid_fields:
-            ftype, fname = field
-            ds = f["/%s" % fname]
+        if len(fields) == 0:
+            return rv
+        for field in fields:
+            f_fname = self.ds._field_map[field[1]]
             for gs in grid_sequences(chunk.objs):
-                start = gs[0].id - gs[0]._id_offset
-                end = gs[-1].id - gs[-1]._id_offset + 1
-                data = ds[start:end,:,:,:].transpose()
                 for i, g in enumerate(gs):
-                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
+                    data = f["MeshBlock%d" % g.id][f_fname][:,:,:].transpose()
+                    rv[g.id][field] = np.asarray(data, "=f8")
         return rv
 


https://bitbucket.org/yt_analysis/yt/commits/4d9da68b94e6/
Changeset:   4d9da68b94e6
Branch:      yt
User:        jzuhone
Date:        2016-04-11 18:52:55+00:00
Summary:     Remove this for now
Affected #:  1 file

diff -r 56a6eb2634e25afbeb95e0f7cc426960ae5af4c8 -r 4d9da68b94e6356a90045e5318fef23774a393ad yt/frontends/athena_pp/tests/test_outputs.py
--- a/yt/frontends/athena_pp/tests/test_outputs.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""
-Athena frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import \
-    assert_equal, \
-    requires_file, \
-    assert_allclose_units
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    small_patch_amr, \
-    data_dir_load
-from yt.frontends.athena.api import AthenaDataset
-from yt.config import ytcfg
-from yt.convenience import load
-
-import yt.units as u
-
-_fields_cloud = ("scalar[0]", "density", "total_energy")
-
-cloud = "ShockCloud/id0/Cloud.0050.vtk"
- at requires_ds(cloud)
-def test_cloud():
-    ds = data_dir_load(cloud)
-    yield assert_equal, str(ds), "Cloud.0050"
-    for test in small_patch_amr(ds, _fields_cloud):
-        test_cloud.__name__ = test.description
-        yield test
-
-_fields_blast = ("temperature", "density", "velocity_magnitude")
-
-blast = "MHDBlast/id0/Blast.0100.vtk"
- at requires_ds(blast)
-def test_blast():
-    ds = data_dir_load(blast)
-    yield assert_equal, str(ds), "Blast.0100"
-    for test in small_patch_amr(ds, _fields_blast):
-        test_blast.__name__ = test.description
-        yield test
-
-uo_stripping = {"time_unit":3.086e14,
-                "length_unit":8.0236e22,
-                "mass_unit":9.999e-30*8.0236e22**3}
-
-_fields_stripping = ("temperature", "density", "specific_scalar[0]")
-
-stripping = "RamPressureStripping/id0/rps.0062.vtk"
- at requires_ds(stripping, big_data=True)
-def test_stripping():
-    ds = data_dir_load(stripping, kwargs={"units_override":uo_stripping})
-    yield assert_equal, str(ds), "rps.0062"
-    for test in small_patch_amr(ds, _fields_stripping):
-        test_stripping.__name__ = test.description
-        yield test
-
-sloshing = "MHDSloshing/virgo_low_res.0054.vtk"
-
-uo_sloshing = {"length_unit": (1.0,"Mpc"),
-               "time_unit": (1.0,"Myr"),
-               "mass_unit": (1.0e14,"Msun")}
-
- at requires_file(sloshing)
-def test_nprocs():
-    ytcfg["yt","skip_dataset_cache"] = "True"
-
-    ds1 = load(sloshing, units_override=uo_sloshing)
-    sp1 = ds1.sphere("c", (100.,"kpc"))
-    prj1 = ds1.proj("density",0)
-    ds2 = load(sloshing, units_override=uo_sloshing, nprocs=8)
-    sp2 = ds2.sphere("c", (100.,"kpc"))
-    prj2 = ds1.proj("density",0)
-
-    ds3 = load(sloshing, parameters=uo_sloshing)
-    assert_equal(ds3.length_unit, u.Mpc)
-    assert_equal(ds3.time_unit, u.Myr)
-    assert_equal(ds3.mass_unit, 1e14*u.Msun)
-
-    yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
-    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
-    for ax in "xyz":
-        yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
-    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
-    yield assert_equal, prj1["density"], prj2["density"]
-
-    ytcfg["yt","skip_dataset_cache"] = "False"
-
- at requires_file(cloud)
-def test_AthenaDataset():
-    assert isinstance(data_dir_load(cloud), AthenaDataset)


https://bitbucket.org/yt_analysis/yt/commits/df2666549427/
Changeset:   df2666549427
Branch:      yt
User:        jzuhone
Date:        2016-04-11 19:02:13+00:00
Summary:     Add in known magnetic fields
Affected #:  1 file

diff -r 4d9da68b94e6356a90045e5318fef23774a393ad -r df26665494274c1aec0aa42fb3a275738b2f4e47 yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -18,6 +18,7 @@
 from yt.utilities.physical_constants import \
     kboltz, mh
 
+b_units = "code_magnetic"
 pres_units = "code_mass/(code_length*code_time**2)"
 rho_units = "code_mass / code_length**3"
 vel_units = "code_length / code_time"
@@ -29,6 +30,9 @@
         ("gas_velocity_x1", (vel_units, ["velocity_x"], None)),
         ("gas_velocity_x2", (vel_units, ["velocity_y"], None)),
         ("gas_velocity_x3", (vel_units, ["velocity_z"], None)),
+        ("bfield_x1", (b_units, [], None)),
+        ("bfield_x2", (b_units, [], None)),
+        ("bfield_x3", (b_units, [], None)),
     )
 
     def setup_fluid_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/f7ce756b14d3/
Changeset:   f7ce756b14d3
Branch:      yt
User:        jzuhone
Date:        2016-04-15 03:45:00+00:00
Summary:     Merge
Affected #:  100 files

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -62,6 +62,7 @@
 yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
+*.pyd
 .*.swp
 *.so
 .idea/*

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
@@ -12,4 +12,5 @@
 prune doc/source/reference/api/generated
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
 prune yt/frontends/_skeleton

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -19,7 +19,7 @@
 CWD = os.getcwd()
 ytcfg["yt", "serialize"] = "False"
 PARALLEL_TEST = {"rockstar_nest": "3"}
-BLACKLIST = []
+BLACKLIST = ["opengl_ipython", "opengl_vr"]
 
 
 def prep_dirs():

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -1,48 +1,91 @@
 .. _absorption_spectrum:
 
-Absorption Spectrum
-===================
+Creating Absorption Spectra
+===========================
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created
-by the (:ref:`light-ray-generator`).  For each element of the ray, column
-densities are calculated multiplying the number density within a grid cell
-with the path length of the ray through the cell.  Line profiles are
-generated using a voigt profile based on the temperature field.  The lines
-are then shifted according to the redshift recorded by the light ray tool
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to
-:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
+Absorption line spectra are spectra generated using bright background sources
+to illuminate tenuous foreground material and are primarily used in studies
+of the circumgalactic medium and intergalactic medium.  These spectra can
+be created using the
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+and
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+analysis modules.
 
-The spectrum generator will output a file containing the wavelength and
-normalized flux.  It will also output a text file listing all important lines.
+The 
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class
+and its workhorse method
+:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum`
+return two arrays, one with wavelengths, the other with the normalized
+flux values at each of the wavelength values.  It can also output a text file
+listing all important lines.
+
+For example, here is an absorption spectrum for the wavelength range from 900 
+to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4:
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms
-made with a light ray extending from z = 0 to z = 0.4.
+And a zoom-in on the 1425-1450 Angstrom window:
 
 .. image:: _images/spectrum_zoom.png
    :width: 500
 
-A zoom-in of the above spectrum.
+Method for Creating Absorption Spectra
+--------------------------------------
 
-Creating an Absorption Spectrum
--------------------------------
+Once a
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+has been created traversing a dataset using the :ref:`light-ray-generator`,
+a series of arrays store the various fields of the gas parcels (represented
+as cells) intersected along the ray.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+steps through each element of the
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s
+arrays and calculates the column density for desired ion by multiplying its
+number density with the path length through the cell.  Using these column
+densities along with temperatures to calculate thermal broadening, voigt
+profiles are deposited on to a featureless background spectrum.  By default,
+the peculiar velocity of the gas is included as a doppler redshift in addition
+to any cosmological redshift of the data dump itself.
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the
-minimum and maximum wavelengths, and the number of wavelength bins.
+Subgrid Deposition
+^^^^^^^^^^^^^^^^^^
+
+For features not resolved (i.e. possessing narrower width than the spectral
+resolution),
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+performs subgrid deposition.  The subgrid deposition algorithm creates a number
+of smaller virtual bins, by default the width of the virtual bins is 1/10th
+the width of the spectral feature.  The Voigt profile is then deposited
+into these virtual bins where it is resolved, and then these virtual bins
+are numerically integrated back to the resolution of the original spectral bin
+size, yielding accurate equivalent widths values.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+informs the user how many spectral features are deposited in this fashion.
+
+Tutorial on Creating an Absorption Spectrum
+-------------------------------------------
+
+Initializing `AbsorptionSpectrum` Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To instantiate an
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+object, the arguments required are the
+minimum and maximum wavelengths (assumed to be in Angstroms), and the number
+of wavelength bins to span this range (including the endpoints)
 
 .. code-block:: python
 
   from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
-  sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
+  sp = AbsorptionSpectrum(900.0, 1800.0, 10001)
 
 Adding Features to the Spectrum
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Absorption lines and continuum features can then be added to the spectrum.
 To add a line, you must know some properties of the line: the rest wavelength,
@@ -67,8 +110,8 @@
 field from the ray data to use to calculate the column density.  The
 ``label_threshold`` keyword tells the spectrum generator to add all lines
 above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
-text line list.  If None is provided, as is the default, no lines of this
-type will be added to the text list.
+text line list output at the end.  If None is provided, as is the default,
+no lines of this type will be added to the text list.
 
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
@@ -86,7 +129,7 @@
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
--------------------
+^^^^^^^^^^^^^^^^^^^
 
 Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
@@ -95,12 +138,12 @@
 
   wavelength, flux = sp.make_spectrum('lightray.h5',
                                       output_file='spectrum.fits',
-                                      line_list_file='lines.txt',
-                                      use_peculiar_velocity=True)
+                                      line_list_file='lines.txt')
 
 A spectrum will be made using the specified ray data and the wavelength and
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to
-False, the lines will only be shifted according to the redshift.
+flux arrays will also be returned.  If you set the optional
+``use_peculiar_velocity`` keyword to False, the lines will not incorporate
+doppler redshifts to shift the deposition of the line features.
 
 Three output file formats are supported for writing out the spectrum: fits,
 hdf5, and ascii.  The file format used is based on the extension provided
@@ -112,15 +155,16 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.
-Spectrum generation is parallelized using a multi-level strategy where each
-absorption line is deposited by a different processor.  If the number of available
-processors is greater than the number of lines, then the deposition of
-individual lines will be divided over multiple processors.
+The `AbsorptionSpectrum` analysis module can be run in parallel simply by
+following the procedures laid out in :ref:`parallel-computation` for running
+yt scripts in parallel.  Spectrum generation is parallelized using a multi-level
+strategy where each absorption line is deposited by a different processor.
+If the number of available processors is greater than the number of lines,
+then the deposition of individual lines will be divided over multiple
+processors.
 
-Fitting an Absorption Spectrum
-------------------------------
+Fitting Absorption Spectra
+==========================
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-opengl_vr:
+
+Advanced Interactive Data Visualization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to manually create all components required to
+start the Interactive Data Visualization.  For more information see 
+:ref:`interactive_data_visualization`.
+
+.. yt_cookbook:: opengl_vr.py
+
+Embedding Interactive Data Visualization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to embed the Interactive Data Visualization inside
+the Jupyter notebook.  For more information see 
+:ref:`interactive_data_visualization`.
+
+.. yt_cookbook:: opengl_ipython.py
+
 Plotting Streamlines
 ~~~~~~~~~~~~~~~~~~~~
 

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/cookbook/opengl_ipython.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_ipython.py
@@ -0,0 +1,29 @@
+import yt
+from yt.visualization.volume_rendering.interactive_vr import \
+    SceneGraph, BlockCollection, TrackballCamera
+from yt.visualization.volume_rendering.interactive_loop import \
+    RenderingContext
+from yt.visualization.volume_rendering import glfw_inputhook 
+
+rc = RenderingContext(1280, 960)
+
+scene = SceneGraph()
+collection = BlockCollection()
+
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+dd = ds.all_data()
+collection.add_data(dd, "density")
+
+scene.add_collection(collection)
+
+position = (1.0, 1.0, 1.0)
+c = TrackballCamera(position=position, focus=ds.domain_center,
+                    near_plane=0.1)
+
+callbacks = rc.setup_loop(scene, c)
+rl = rc(scene, c, callbacks)
+
+# To make this work from IPython execute:
+#
+# glfw_inputhook.inputhook_manager.enable_gui("glfw", app=rl)

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/cookbook/opengl_vr.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_vr.py
@@ -0,0 +1,27 @@
+import yt
+from yt.visualization.volume_rendering.interactive_vr import \
+    SceneGraph, BlockCollection, TrackballCamera
+from yt.visualization.volume_rendering.interactive_loop import \
+    RenderingContext
+
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create GLUT window
+rc = RenderingContext(1280, 960)
+
+# Create a 3d Texture from all_data()
+collection = BlockCollection()
+dd = ds.all_data()
+collection.add_data(dd, "density")
+
+# Initiliaze basic Scene and pass the data
+scene = SceneGraph()
+scene.add_collection(collection)
+
+# Create default camera
+position = (1.0, 1.0, 1.0)
+c = TrackballCamera(position=position, focus=ds.domain_center,
+                    near_plane=0.1)
+
+# Start rendering loop
+rc.start_loop(scene, c)

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -485,12 +485,12 @@
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in
-*tests/tests_2.7.yaml* in the main yt repository:
+*tests/tests.yaml* in the main yt repository:
 
 .. code-block:: yaml
 
    answer_tests:
-      local_artio_270:
+      local_artio_000:
          - yt/frontends/artio/tests/test_outputs.py
    # ...
    other_tests:
@@ -498,7 +498,7 @@
          - '-v'
          - '-s'
 
-Each element under *answer_tests* defines answer name (*local_artio_270* in above
+Each element under *answer_tests* defines answer name (*local_artio_000* in above
 snippet) and specifies a list of files/classes/methods that will be validated
 (*yt/frontends/artio/tests/test_outputs.py* in above snippet). On the testing
 server it is translated to:
@@ -506,7 +506,7 @@
 .. code-block:: bash
 
    $ nosetests --with-answer-testing --local --local-dir ... --answer-big-data \
-      --answer-name=local_artio_270 \
+      --answer-name=local_artio_000 \
       yt/frontends/artio/tests/test_outputs.py
 
 If the answer doesn't exist on the server yet, ``nosetests`` is run twice and
@@ -516,21 +516,21 @@
 ~~~~~~~~~~~~~~~~
 
 In order to regenerate answers for a particular set of tests it is sufficient to
-change the answer name in *tests/tests_2.7.yaml* e.g.:
+change the answer name in *tests/tests.yaml* e.g.:
 
 .. code-block:: diff
 
-   --- a/tests/tests_2.7.yaml
-   +++ b/tests/tests_2.7.yaml
+   --- a/tests/tests.yaml
+   +++ b/tests/tests.yaml
    @@ -25,7 +25,7 @@
         - yt/analysis_modules/halo_finding/tests/test_rockstar.py
         - yt/frontends/owls_subfind/tests/test_outputs.py
 
-   -  local_owls_270:
-   +  local_owls_271:
+   -  local_owls_000:
+   +  local_owls_001:
         - yt/frontends/owls/tests/test_outputs.py
 
-      local_pw_270:
+      local_pw_000:
 
 would regenerate answers for OWLS frontend.
 
@@ -538,20 +538,35 @@
 ~~~~~~~~~~~~~~~~~~~~~~~
 
 In order to add a new set of answer tests, it is sufficient to extend the
-*answer_tests* list in *tests/tests_2.7.yaml* e.g.:
+*answer_tests* list in *tests/tests.yaml* e.g.:
 
 .. code-block:: diff
 
-   --- a/tests/tests_2.7.yaml
-   +++ b/tests/tests_2.7.yaml
+   --- a/tests/tests.yaml
+   +++ b/tests/tests.yaml
    @@ -60,6 +60,10 @@
         - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
         - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
 
-   +  local_gdf_270:
+   +  local_gdf_000:
    +    - yt/frontends/gdf/tests/test_outputs.py
    +
    +
     other_tests:
       unittests:
 
+Restricting Python Versions for Answer Tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If for some reason a test can be run only for a specific version of python it is
+possible to indicate this by adding a ``[py2]`` or ``[py3]`` tag. For example:
+
+.. code-block:: yaml
+
+   answer_tests:
+      local_test_000:
+         - yt/test_A.py  # [py2]
+         - yt/test_B.py  # [py3]
+
+would result in ``test_A.py`` being run only for *python2* and ``test_B.py``
+being run only for *python3*.

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -244,6 +244,26 @@
 
 Option 1:
 
+Ensure that you have all build dependencies installed in your current
+conda environment:
+
+.. code-block:: bash
+
+  conda install cython mercurial sympy ipython h5py matplotlib
+
+.. note::
+  
+  If you are using a python3 environment, ``conda`` will not be able to install
+  *mercurial*, which works only with python2. You can circumvent this issue by
+  creating a dedicated python2 environment and symlinking *hg* in your current
+  environment:
+
+  .. code-block:: bash
+
+     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+     conda create -y -n py27 python=2.7 mercurial
+     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+
 Clone the yt repository with:
 
 .. code-block:: bash
@@ -347,6 +367,27 @@
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
   if you are curious why ``--prefix=`` is neccessary on some systems.
 
+.. note::
+
+   yt requires version 18.0 or higher of ``setuptools``. If you see
+   error messages about this package, you may need to update it. For
+   example, with pip via
+
+   .. code-block:: bash
+
+      pip install --upgrade setuptools
+
+   or your preferred method. If you have ``distribute`` installed, you
+   may also see error messages for it if it's out of date. You can
+   update with pip via
+
+   .. code-block:: bash
+
+      pip install --upgrade distribute
+
+   or via your preferred method.
+   
+
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/visualizing/FITSImageData.ipynb
--- a/doc/source/visualizing/FITSImageData.ipynb
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -87,7 +87,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Because `FITSImageData` inherits from the [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) class, we can call its methods. For example, `info` shows us the contents of the virtual FITS file:"
+    "We can call a number of the [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) class's methods from a `FITSImageData` object. For example, `info` shows us the contents of the virtual FITS file:"
    ]
   },
   {

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/visualizing/_images/idv.jpg
Binary file doc/source/visualizing/_images/idv.jpg has changed

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -3,33 +3,33 @@
 Colormaps
 =========
 
-There are several colormaps available for yt.  yt includes all of the 
-matplotlib colormaps as well for nearly all functions.  Individual 
-visualization functions usually allow you to specify a colormap with the 
+There are several colormaps available for yt.  yt includes all of the
+matplotlib colormaps as well for nearly all functions.  Individual
+visualization functions usually allow you to specify a colormap with the
 ``cmap`` flag.
 
 .. _install-palettable:
 
-Palettable and ColorBrewer2 
+Palettable and ColorBrewer2
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 While colormaps that employ a variety of colors often look attractive,
 they are not always the best choice to convey information to one's audience.
-There are numerous `articles <https://eagereyes.org/basics/rainbow-color-map>`_ 
-and 
-`presentations <http://pong.tamu.edu/~kthyng/presentations/visualization.pdf>`_ 
-that discuss how rainbow-based colormaps fail with regard to black-and-white 
+There are numerous `articles <https://eagereyes.org/basics/rainbow-color-map>`_
+and
+`presentations <http://pong.tamu.edu/~kthyng/presentations/visualization.pdf>`_
+that discuss how rainbow-based colormaps fail with regard to black-and-white
 reproductions, colorblind audience members, and confusing in color ordering.
 Depending on the application, the consensus seems to be that gradients between
 one or two colors are the best way for the audience to extract information
 from one's figures.  Many such colormaps are found in palettable.
 
-If you have installed `palettable <http://jiffyclub.github.io/palettable/>`_ 
-(formerly brewer2mpl), you can also access the discrete colormaps available 
+If you have installed `palettable <http://jiffyclub.github.io/palettable/>`_
+(formerly brewer2mpl), you can also access the discrete colormaps available
 to that package including those from `colorbrewer <http://colorbrewer2.org>`_.
-Install `palettable <http://jiffyclub.github.io/palettable/>`_ with 
-``pip install palettable``.  To access these maps in yt, instead of supplying 
-the colormap name, specify a tuple of the form (name, type, number), for 
+Install `palettable <http://jiffyclub.github.io/palettable/>`_ with
+``pip install palettable``.  To access these maps in yt, instead of supplying
+the colormap name, specify a tuple of the form (name, type, number), for
 example ``('RdBu', 'Diverging', 9)``.  These discrete colormaps will
 not be interpolated, and can be useful for creating
 colorblind/printer/grayscale-friendly plots. For more information, visit
@@ -40,22 +40,22 @@
 Making and Viewing Custom Colormaps
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-yt can also accommodate custom colormaps using the 
-:func:`~yt.visualization.color_maps.make_colormap` function 
-These custom colormaps can be made to an arbitrary level of 
-complexity.  You can make these on the fly for each yt session, or you can 
-store them in your :ref:`plugin-file` for access to them in every future yt 
+yt can also accommodate custom colormaps using the
+:func:`~yt.visualization.color_maps.make_colormap` function
+These custom colormaps can be made to an arbitrary level of
+complexity.  You can make these on the fly for each yt session, or you can
+store them in your :ref:`plugin-file` for access to them in every future yt
 session.  The example below creates two custom colormaps, one that has
-three equally spaced bars of blue, white and red, and the other that 
-interpolates in increasing lengthen intervals from black to red, to green, 
-to blue.  These will be accessible for the rest of the yt session as 
-'french_flag' and 'weird'.  See 
-:func:`~yt.visualization.color_maps.make_colormap` and 
+three equally spaced bars of blue, white and red, and the other that
+interpolates in increasing lengthed intervals from black to red, to green,
+to blue.  These will be accessible for the rest of the yt session as
+'french_flag' and 'weird'.  See
+:func:`~yt.visualization.color_maps.make_colormap` and
 :func:`~yt.visualization.color_maps.show_colormaps` for more details.
 
 .. code-block:: python
 
-    yt.make_colormap([('blue', 20), ('white', 20), ('red', 20)], 
+    yt.make_colormap([('blue', 20), ('white', 20), ('red', 20)],
                      name='french_flag', interpolate=False)
     yt.make_colormap([('black', 5), ('red', 10), ('green', 20), ('blue', 0)],
                      name='weird', interpolate=True)
@@ -66,7 +66,7 @@
 
 This is a chart of all of the yt and matplotlib colormaps available.  In
 addition to each colormap displayed here, you can access its "reverse" by simply
-appending a ``"_r"`` to the end of the colormap name.  
+appending a ``"_r"`` to the end of the colormap name.
 
 .. image:: ../_images/all_colormaps.png
    :width: 512
@@ -80,7 +80,7 @@
 Displaying Colormaps Locally
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To display the most up to date colormaps locally, you can use the 
+To display the most up to date colormaps locally, you can use the
 :func:`~yt.visualization.color_maps.show_colormaps` function.  By default,
 you'll see every colormap available to you, but you can specify subsets
 of colormaps to display, either as just the ``yt_native`` colormaps, or
@@ -98,7 +98,7 @@
 
     import yt
     yt.show_colormaps(subset=['algae', 'kamae', 'spectral',
-                              'arbre', 'dusk', 'octarine', 'kelp'], 
+                              'arbre', 'dusk', 'octarine', 'kelp'],
                       filename="yt_native.png")
 
 Applying a Colormap to your Rendering
@@ -111,7 +111,7 @@
 
     yt.write_image(im, "output.png", cmap_name = 'jet')
 
-If you're using the Plot Window interface (e.g. SlicePlot, ProjectionPlot, 
+If you're using the Plot Window interface (e.g. SlicePlot, ProjectionPlot,
 etc.), it's even easier than that.  Simply create your rendering, and you
 can quickly swap the colormap on the fly after the fact with the ``set_cmap``
 callback:
@@ -127,16 +127,16 @@
     p.set_cmap(field="density", cmap='hot')
     p.save('proj_with_hot_cmap.png')
 
-For more information about the callbacks available to Plot Window objects, 
+For more information about the callbacks available to Plot Window objects,
 see :ref:`callbacks`.
 
 Examples of Each Colormap
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 To give the reader a better feel for how a colormap appears once it is applied
-to a dataset, below we provide a library of identical projections of an 
-isolated galaxy where only the colormap has changed.  They use the sample 
-dataset "IsolatedGalaxy" available at 
+to a dataset, below we provide a library of identical projections of an
+isolated galaxy where only the colormap has changed.  They use the sample
+dataset "IsolatedGalaxy" available at
 `http://yt-project.org/data <http://yt-project.org/data>`_.
 
 .. yt_colormaps:: cmap_images.py

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,6 +16,7 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
+   interactive_data_visualization
    sketchfab
    mapserver
    streamlines

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e doc/source/visualizing/interactive_data_visualization.rst
--- /dev/null
+++ b/doc/source/visualizing/interactive_data_visualization.rst
@@ -0,0 +1,102 @@
+.. _interactive_data_visualization:
+
+Interactive Data Visualization
+==============================
+
+In version 3.3 of yt, an experimental, hardware-accelerated interactive volume
+renderer was introduced.  This interactive renderer is based on OpenGL and
+natively understands adaptive mesh refinement data; this enables
+(GPU) memory-efficient loading of data.  The data is copied from CPU memory
+onto the GPU as a series of 3D textures, which are then rendered to an
+interactive window.  The window itself is the view from a conceptual "camera",
+which can be rotated, zoomed, and so on.  The color of each displayed pixel is
+computed by a "fragment shader" which is executed on each grid that is
+displayed.  The fragment shaders currently implemented in yt enable computing
+(and then mapping to a colormap) the maximum value along each pixel's line of
+sight and an unweighted integration of values along each pixel's line of sight
+(and subsequent mapping to a colormap.)  An experimental transfer function
+shader has been implemented, but is not yet functioning correctly.  For more
+information, see :ref:`projection-types`.
+
+A comprehensive description of the OpenGL volume rendering is beyond the scope
+of this document. However, a more detailed explanation can be found in `this
+guide <https://open.gl/>`_.
+
+Much of the Interactive Data Visualization (IDV) interface is designed to
+mimic the interface available for software volume rendering (see
+:ref:`volume_rendering`) so that in future versions API compatibility may lead
+to greater code reuse both for scripts that create visualizations and for
+internal visualization objects.
+
+Installation
+^^^^^^^^^^^^
+
+In order to use Interactive Data Visualization (IDV) you need to install
+`PyOpenGL <https://pypi.python.org/pypi/PyOpenGL>`_ and `cyglfw3
+<https://pypi.python.org/pypi/cyglfw3/>`_ along with their respective
+dependencies, e.g. `glfw3 <http://www.glfw.org/>`_ is required to be installed
+before you can ``pip install cyglfw3``. Please carefully read installation
+instructions provided on pypi pages of both packages. 
+
+If you are using conda, ``cyglfw3`` is provided in our conda channel
+(``pyopengl`` is shipped by Continuum already) and can be installed via:
+
+.. code-block:: bash
+
+    conda install -c http://use.yt/with_conda/ cyglfw3 pyopengl
+
+Using the interactive renderer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can simply pass dataset to :meth:`~yt.interactive_render`. By default
+it will load all data and render gas density:
+
+.. code-block:: python
+
+    import yt
+    
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    yt.interactive_render(ds)
+
+Alternatively you can provide a data object as a first argument to
+:meth:`~yt.interactive_render` if your dataset is too big to fit GPU memory:
+
+.. code-block:: python
+
+    import yt
+
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    sp = ds.sphere("max", (0.1, "Mpc"))
+
+    cam_pos = ds.arr([0.1, 0.1, 0.1], "Mpc").in_units("code_length")
+    yt.interactive_render(sp, field="pressure", cam_position=cam_pos,
+                          window_size=(512, 512))
+
+A successful call to :meth:`~yt.interactive_render` should create a new window
+called *vol_render*. 
+
+.. image:: _images/idv.jpg
+   :width: 1000
+
+By default it renders a Maximum Intensity Projection of the density field (see
+:ref:`projection-types` for more information). The rendering can be
+dynamically modified using the following keybindings:
+
+1
+   Switch to MIP fragment shader
+2
+   Switch to integration fragment shader
+L
+   Switch between linear and logarithmic scales
+W
+   Zoom in the camera
+S
+   Zoom out the camera
+C
+   Change the colormap
+
+Pressing the *h* key will print all the available key bindings in a terminal window.
+The camera can be moved around by holding a left mouse button while moving the mouse.
+
+More advanced initialization of interactive volume renderer can be found in
+:ref:`cookbook-opengl_vr`.

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,6 +30,10 @@
         files += glob.glob("%s/*.%s" % (dir_name, ext))
     MAPSERVER_FILES.append((dir_name, files))
 
+SHADERS_DIR = os.path.join("yt", "visualization", "volume_rendering", "shaders")
+SHADERS_FILES = glob.glob(os.path.join(SHADERS_DIR, "*.vertexshader")) + \
+    glob.glob(os.path.join(SHADERS_DIR, "*.fragmentshader"))
+
 VERSION = "3.3.dev0"
 
 if os.path.exists('MANIFEST'):
@@ -41,51 +45,56 @@
 else:
     omp_args = None
 
+if os.name == "nt":
+    std_libs = []
+else:
+    std_libs = ["m"]
 
 cython_extensions = [
     Extension("yt.analysis_modules.photon_simulator.utils",
               ["yt/analysis_modules/photon_simulator/utils.pyx"]),
     Extension("yt.analysis_modules.ppv_cube.ppv_utils",
               ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
-              libraries=["m"]),
+              libraries=std_libs),
     Extension("yt.geometry.grid_visitors",
               ["yt/geometry/grid_visitors.pyx"],
               include_dirs=["yt/utilities/lib"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
     Extension("yt.geometry.grid_container",
               ["yt/geometry/grid_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_container.pxd",
                        "yt/geometry/grid_visitors.pxd"]),
     Extension("yt.geometry.oct_container",
-              ["yt/geometry/oct_container.pyx"],
+              ["yt/geometry/oct_container.pyx",
+               "yt/utilities/lib/tsearch.c"],
               include_dirs=["yt/utilities/lib"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.oct_visitors",
               ["yt/geometry/oct_visitors.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.particle_oct_container",
               ["yt/geometry/particle_oct_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.selection_routines",
               ["yt/geometry/selection_routines.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
                        "yt/geometry/oct_container.pxd",
@@ -96,7 +105,7 @@
     Extension("yt.geometry.particle_deposit",
               ["yt/geometry/particle_deposit.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -104,7 +113,7 @@
     Extension("yt.geometry.particle_smooth",
               ["yt/geometry/particle_smooth.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd",
@@ -113,28 +122,30 @@
     Extension("yt.geometry.fake_octree",
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/oct_container.pxd",
                        "yt/geometry/selection_routines.pxd"]),
     Extension("yt.utilities.spatial.ckdtree",
               ["yt/utilities/spatial/ckdtree.pyx"],
-              libraries=["m"]),
+              include_dirs=["yt/utilities/lib/"],
+              libraries=std_libs),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
-              libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/amr_kdtools.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
@@ -144,12 +155,12 @@
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
     Extension("yt.utilities.lib.marching_cubes",
               ["yt/utilities/lib/marching_cubes.pyx",
                "yt/utilities/lib/fixed_interpolator.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/utilities/lib/fixed_interpolator.pxd",
                        "yt/utilities/lib/fixed_interpolator.h",
@@ -159,7 +170,7 @@
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
               language="c++",
-              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.origami",
@@ -172,7 +183,7 @@
                "yt/utilities/lib/fixed_interpolator.c",
                "yt/utilities/lib/kdtree.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=["m"],
+              libraries=std_libs,
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               depends=["yt/utilities/lib/fp_utils.pxd",
@@ -183,10 +194,10 @@
                        "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
-              libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
     Extension("yt.utilities.lib.alt_ray_tracers",
               ["yt/utilities/lib/alt_ray_tracers.pyx"],
-              libraries=["m"]),
+              libraries=std_libs),
 ]
 
 lib_exts = [
@@ -199,7 +210,7 @@
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]))
+                  libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -211,7 +222,7 @@
     Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF",
               ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c",
                "yt/analysis_modules/halo_finding/fof/kd.c"],
-              libraries=["m"]),
+              libraries=std_libs),
     Extension("yt.analysis_modules.halo_finding.hop.EnzoHop",
               glob.glob("yt/analysis_modules/halo_finding/hop/*.c")),
     Extension("yt.frontends.artio._artio_caller",
@@ -229,10 +240,10 @@
               glob.glob("yt/utilities/spatial/src/*.c")),
     Extension("yt.visualization._MPL",
               ["yt/visualization/_MPL.c"],
-              libraries=["m"]),
+              libraries=std_libs),
     Extension("yt.utilities.data_point_utilities",
               ["yt/utilities/data_point_utilities.c"],
-              libraries=["m"]),
+              libraries=std_libs),
 ]
 
 # EMBREE
@@ -263,7 +274,7 @@
         conda_basedir = os.path.dirname(os.path.dirname(sys.executable))
         embree_inc_dir.append(os.path.join(conda_basedir, 'include'))
         embree_lib_dir.append(os.path.join(conda_basedir, 'lib'))
-        
+
     if _platform == "darwin":
         embree_lib_name = "embree.2"
     else:
@@ -273,7 +284,8 @@
         ext.include_dirs += embree_inc_dir
         ext.library_dirs += embree_lib_dir
         ext.language = "c++"
-        ext.libraries += ["m", embree_lib_name]
+        ext.libraries += std_libs
+        ext.libraries += [embree_lib_name]
 
     cython_extensions += embree_extensions
 
@@ -399,7 +411,7 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES,
+    data_files=MAPSERVER_FILES + SHADERS_FILES,
     ext_modules=cython_extensions + extensions
 )
 

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -138,6 +138,10 @@
         import hglib
     except ImportError:
         return None
-    with hglib.open(target_dir) as repo:
-        changeset = repo.identify(id=True, branch=True).strip().decode('utf8')
+    try:
+        with hglib.open(target_dir) as repo:
+            changeset = repo.identify(
+                id=True, branch=True).strip().decode('utf8')
+    except hglib.error.ServerError:
+        return None
     return changeset

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -7,7 +7,6 @@
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
-
 class NoseWorker(multiprocessing.Process):
 
     def __init__(self, task_queue, result_queue):
@@ -54,10 +53,19 @@
 
 
 def generate_tasks_input():
+    pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor)
+    if sys.version_info < (3, 0, 0):
+        DROP_TAG = "py3"
+    else:
+        DROP_TAG = "py2"
+
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
-    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
-        tests = yaml.load(obj)
+    with open('tests/tests.yaml', 'r') as obj:
+        lines = obj.read()
+    data = '\n'.join([line for line in lines.split('\n')
+                      if DROP_TAG not in line])
+    tests = yaml.load(data)
 
     base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
@@ -66,9 +74,11 @@
     for test in list(tests["other_tests"].keys()):
         args.append([test] + tests["other_tests"][test])
     for answer in list(tests["answer_tests"].keys()):
-        argv = [answer]
+        if tests["answer_tests"][answer] is None:
+            continue
+        argv = ["{}_{}".format(pyver, answer)]
         argv += base_argv
-        argv.append('--answer-name=%s' % answer)
+        argv.append('--answer-name=%s' % argv[0])
         argv += tests["answer_tests"][answer]
         args.append(argv)
 

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e tests/tests.yaml
--- /dev/null
+++ b/tests/tests.yaml
@@ -0,0 +1,73 @@
+answer_tests:
+  local_artio_000:
+    - yt/frontends/artio/tests/test_outputs.py
+
+  local_athena_000:
+    - yt/frontends/athena
+
+  local_chombo_000:
+    - yt/frontends/chombo/tests/test_outputs.py
+
+  local_enzo_000:
+    - yt/frontends/enzo
+
+  local_fits_000:
+    - yt/frontends/fits/tests/test_outputs.py
+
+  local_flash_000:
+    - yt/frontends/flash/tests/test_outputs.py
+
+  local_gadget_000:
+    - yt/frontends/gadget/tests/test_outputs.py
+
+  local_gdf_000:
+    - yt/frontends/gdf/tests/test_outputs.py
+
+  local_halos_000:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
+  
+  local_owls_000:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_000:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_000:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_000:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
+
+  local_orion_000:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_000:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_000:
+    - yt/frontends/ytdata
+
+  local_absorption_spectrum_000:
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-answer_tests:
-  local_artio_270:
-    - yt/frontends/artio/tests/test_outputs.py
-
-  local_athena_270:
-    - yt/frontends/athena
-
-  local_chombo_271:
-    - yt/frontends/chombo/tests/test_outputs.py
-
-  local_enzo_270:
-    - yt/frontends/enzo
-
-  local_fits_270:
-    - yt/frontends/fits/tests/test_outputs.py
-
-  local_flash_270:
-    - yt/frontends/flash/tests/test_outputs.py
-
-  local_gadget_270:
-    - yt/frontends/gadget/tests/test_outputs.py
-
-  local_gdf_270:
-    - yt/frontends/gdf/tests/test_outputs.py
-
-  local_halos_270:
-    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-    - yt/frontends/owls_subfind/tests/test_outputs.py
-  
-  local_owls_270:
-    - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_271:
-    - yt/visualization/tests/test_plotwindow.py:test_attributes
-    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
-    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
-  local_tipsy_270:
-    - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_274:
-    - yt/analysis_modules/radmc3d_export
-    - yt/frontends/moab/tests/test_c5.py
-    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-    - yt/visualization/volume_rendering/tests/test_mesh_render.py
-
-  local_orion_270:
-    - yt/frontends/boxlib/tests/test_orion.py
-  
-  local_ramses_270:
-    - yt/frontends/ramses/tests/test_outputs.py
-  
-  local_ytdata_270:
-    - yt/frontends/ytdata
-
-  local_absorption_spectrum_271:
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
-
-other_tests:
-  unittests:
-     - '-v'
-  cookbook:
-     - '-v'
-     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e tests/tests_3.5.yaml
--- a/tests/tests_3.5.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-answer_tests:
-  local_artio_350:
-    - yt/frontends/artio/tests/test_outputs.py
-
-  local_athena_350:
-    - yt/frontends/athena
-
-  local_chombo_350:
-    - yt/frontends/chombo/tests/test_outputs.py
-
-  local_enzo_350:
-    - yt/frontends/enzo
-
-  local_fits_350:
-    - yt/frontends/fits/tests/test_outputs.py
-
-  local_flash_350:
-    - yt/frontends/flash/tests/test_outputs.py
-
-  local_gadget_350:
-    - yt/frontends/gadget/tests/test_outputs.py
-
-  local_gdf_350:
-    - yt/frontends/gdf/tests/test_outputs.py
-
-  local_halos_350:
-    - yt/frontends/owls_subfind/tests/test_outputs.py
-    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
-    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
-  
-  local_owls_350:
-    - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_350:
-    - yt/visualization/tests/test_plotwindow.py:test_attributes
-    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
-    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
-  local_tipsy_350:
-    - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_351:
-    - yt/analysis_modules/radmc3d_export
-    - yt/frontends/moab/tests/test_c5.py
-    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-    - yt/visualization/volume_rendering/tests/test_mesh_render.py
-
-  local_orion_350:
-    - yt/frontends/boxlib/tests/test_orion.py
-  
-  local_ramses_350:
-    - yt/frontends/ramses/tests/test_outputs.py
-  
-  local_ytdata_350:
-    - yt/frontends/ytdata
-
-  local_absorption_spectrum_350:
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
-
-other_tests:
-  unittests:
-     - '-v'
-  cookbook:
-     - '-v'
-     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -159,7 +159,7 @@
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \
-    off_axis_projection
+    off_axis_projection, interactive_render
 import yt.visualization.volume_rendering.api as volume_rendering
 #    TransferFunctionHelper, MultiVariateTransferFunction
 #    off_axis_projection

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -28,7 +28,4 @@
     HaloFinder, \
     LoadHaloes, \
     LoadTextHalos, \
-    LoadTextHaloes, \
-    RockstarHalo, \
-    RockstarHaloList, \
-    LoadRockstarHalos
+    LoadTextHaloes

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -28,14 +28,20 @@
 Py_EnzoFOF(PyObject *obj, PyObject *args)
 {
     PyObject    *oxpos, *oypos, *ozpos;
-
     PyArrayObject    *xpos, *ypos, *zpos;
-    xpos=ypos=zpos=NULL;
     float link = 0.2;
     float fPeriod[3] = {1.0, 1.0, 1.0};
 	int nMembers = 8;
+    int i, num_particles;
+	KDFOF kd;
+	int nBucket,j;
+	float fEps;
+	int nGroup,bVerbose=1;
+	int sec,usec;
+	PyArrayObject *particle_group_id;
+    PyObject *return_value;
 
-    int i;
+    xpos=ypos=zpos=NULL;
 
     if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
         &oxpos, &oypos, &ozpos, &link,
@@ -54,7 +60,7 @@
              "EnzoFOF: xpos didn't work.");
     goto _fail;
     }
-    int num_particles = PyArray_SIZE(xpos);
+    num_particles = PyArray_SIZE(xpos);
 
     ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
@@ -75,12 +81,6 @@
     }
 
     /* let's get started with the FOF stuff */
-
-	KDFOF kd;
-	int nBucket,j;
-	float fEps;
-	int nGroup,bVerbose=1;
-	int sec,usec;
 	
 	/* linking length */
 	fprintf(stdout, "Link length is %f\n", link);
@@ -128,7 +128,7 @@
     // All we need to do is group information.
     
     // Tags are in kd->p[i].iGroup
-    PyArrayObject *particle_group_id = (PyArrayObject *)
+    particle_group_id = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_INT32));
     
@@ -142,7 +142,7 @@
 
     PyArray_UpdateFlags(particle_group_id,
         NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
-    PyObject *return_value = Py_BuildValue("N", particle_group_id);
+    return_value = Py_BuildValue("N", particle_group_id);
 
     Py_DECREF(xpos);
     Py_DECREF(ypos);

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -1,11 +1,11 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <sys/time.h>
-#if defined(WIN32) || defined(WIN64) 
-#include <windows.h> 
+#ifdef _WIN32
+#include <windows.h>
 #else
 #include <sys/resource.h>
+#include <sys/time.h>
 #endif
 #include <assert.h>
 #include "kd.h"
@@ -15,7 +15,7 @@
 void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
 {
 
-#if defined(WIN32) || defined(WIN64)
+#ifdef _WIN32
         int secs, usecs;
         HANDLE hProcess = GetCurrentProcess();
 	FILETIME ftCreation, ftExit, ftKernel, ftUser;

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -17,8 +17,6 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 import math
 import numpy as np
-import glob
-import os
 import os.path as path
 from functools import cmp_to_key
 from yt.extern.six import add_metaclass
@@ -560,126 +558,6 @@
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
-class RockstarHalo(Halo):
-    _name = "RockstarHalo"
-    # See particle_mask
-    _radjust = 4.
-
-    def maximum_density(self):
-        r"""Not implemented."""
-        return -1
-
-    def maximum_density_location(self):
-        r"""Not implemented."""
-        return self.center_of_mass()
-
-    def write_particle_list(self,handle):
-        r"""Not implemented."""
-        return -1
-
-    def virial_mass(self):
-        r"""Virial mass in Msun/h"""
-        return self.supp['m']
-
-    def virial_radius(self):
-        r"""Virial radius in Mpc/h comoving"""
-        return self.supp['r']
-
-    def __getitem__(self, key):
-        # This function will try to get particle data in one of three ways,
-        # in descending preference.
-        # 1. From saved_fields, e.g. we've already got it.
-        # 2. From the halo binary files off disk.
-        # 3. Use the unique particle indexes of the halo to select a missing
-        # field from a Sphere.
-        if key in self._saved_fields:
-            # We've already got it.
-            return self._saved_fields[key]
-        # Gotta go get it from the Rockstar binary file.
-        if key == 'particle_index':
-            IDs = self._get_particle_data(self.supp['id'],
-                self.halo_list.halo_to_fname, self.size, key)
-            IDs = IDs[IDs.argsort()]
-            self._saved_fields[key] = IDs
-            return self._saved_fields[key]
-        # We won't store this field below in saved_fields because
-        # that would mean keeping two copies of it, one in the yt
-        # machinery and one here.
-        ds = self.ds.sphere(self.CoM, 4 * self.max_radius)
-        return np.take(ds[key][self._ds_sort], self.particle_mask)
-
-    def _get_particle_data(self, halo, fnames, size, field):
-        # Given a list of file names, a halo, its size, and the desired field,
-        # this returns the particle indices for that halo.
-        file = fnames[halo]
-        mylog.info("Getting %d particles from Rockstar binary file %s.", self.supp['num_p'], file)
-        fp = open(file, 'rb')
-        # We need to skip past the header and all the halos.
-        fp.seek(self.halo_list._header_dt.itemsize + \
-            self.halo_list.fname_halos[file] * \
-            self.halo_list._halo_dt.itemsize, os.SEEK_CUR)
-        # Now we skip ahead to where this halos particles begin.
-        fp.seek(self.supp['p_start'] * 8, os.SEEK_CUR)
-        # And finally, read in the ids.
-        IDs = np.fromfile(fp, dtype=np.int64, count=self.supp['num_p'])
-        fp.close()
-        return IDs
-
-    def get_ellipsoid_parameters(self):
-        r"""Calculate the parameters that describe the ellipsoid of
-        the particles that constitute the halo.
-
-        Parameters
-        ----------
-        None
-
-        Returns
-        -------
-        tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
-            The 6-tuple has in order:
-              #. The center of mass as an array.
-              #. mag_A as a float.
-              #. mag_B as a float.
-              #. mag_C as a float.
-              #. e0_vector as an array.
-              #. tilt as a float.
-
-        Examples
-        --------
-        >>> params = halos[0].get_ellipsoid_parameters()
-        """
-        basic_parameters = self._get_ellipsoid_parameters_basic()
-        toreturn = [self.center_of_mass()]
-        updated = [basic_parameters[0], basic_parameters[1],
-            basic_parameters[2], np.array([basic_parameters[3],
-            basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
-        toreturn.extend(updated)
-        return tuple(toreturn)
-
-    def get_ellipsoid(self):
-        r"""Returns an ellipsoidal data object.
-
-        This will generate a new, empty ellipsoidal data object for this
-        halo.
-
-        Parameters
-        ----------
-        None.
-
-        Returns
-        -------
-        ellipsoid : `yt.data_objects.data_containers.YTEllipsoid`
-            The ellipsoidal data object.
-
-        Examples
-        --------
-        >>> ell = halos[0].get_ellipsoid()
-        """
-        ep = self.get_ellipsoid_parameters()
-        ell = self.data.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3],
-            ep[4], ep[5])
-        return ell
-
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -1126,134 +1004,6 @@
             f.flush()
         f.close()
 
-class RockstarHaloList(HaloList):
-    _name = "Rockstar"
-    _halo_class = RockstarHalo
-    # see io_internal.h in Rockstar.
-    BINARY_HEADER_SIZE=256
-    _header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
-        ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
-        ('Ol', np.float32), ('h0', np.float32),
-        ('bounds', (np.float32, 6)), ('num_halos', np.int64),
-        ('num_particles', np.int64), ('box_size', np.float32),
-        ('particle_mass', np.float32), ('particle_type', np.int64),
-        ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
-    # see halo.h.
-    _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
-        ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
-        ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
-        ('vmax_r', np.float32),
-        ('mgrav', np.float32), ('vmax', np.float32),
-        ('rvmax', np.float32), ('rs', np.float32),
-        ('klypin_rs', np.float32),
-        ('vrms', np.float32), ('J', (np.float32, 3)),
-        ('energy', np.float32), ('spin', np.float32),
-        ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
-        ('Voff', np.float32), ('b_to_a', np.float32),
-        ('c_to_a', np.float32), ('A', (np.float32, 3)),
-        ('bullock_spin', np.float32), ('kin_to_pot', np.float32),
-        ('num_p', np.int64),
-        ('num_child_particles', np.int64), ('p_start', np.int64),
-        ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
-        ('min_pos_err', np.float32), ('min_vel_err', np.float32),
-        ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
-    # Above, padding* are due to c byte ordering which pads between
-    # 4 and 8 byte values in the struct as to not overlap memory registers.
-    _tocleanup = ['padding2']
-
-    def __init__(self, ds, out_list):
-        ParallelAnalysisInterface.__init__(self)
-        mylog.info("Initializing Rockstar List")
-        self._data_source = None
-        self._groups = []
-        self._max_dens = -1
-        self.ds = ds
-        self.redshift = ds.current_redshift
-        self.out_list = out_list
-        self._data_source = ds.all_data()
-        mylog.info("Parsing Rockstar halo list")
-        self._parse_output()
-        mylog.info("Finished %s"%out_list)
-
-    def _run_finder(self):
-        pass
-
-    def __obtain_particles(self):
-        pass
-
-    def _get_dm_indices(self):
-        pass
-
-    def _get_halos_binary(self, files):
-        """
-        Parse the binary files to get information about halos in higher
-        precision than the text file.
-        """
-        halos = None
-        self.halo_to_fname = {}
-        self.fname_halos = {}
-        for file in files:
-            fp = open(file, 'rb')
-            # read the header
-            header = np.fromfile(fp, dtype=self._header_dt, count=1)
-            # read the halo information
-            new_halos = np.fromfile(fp, dtype=self._halo_dt,
-                count=header['num_halos'])
-            # Record which binary file holds these halos.
-            for halo in new_halos['id']:
-                self.halo_to_fname[halo] = file
-            # Record how many halos are stored in each binary file.
-            self.fname_halos[file] = header['num_halos']
-            # Add to existing.
-            if halos is not None:
-                halos = np.concatenate((new_halos, halos))
-            else:
-                halos = new_halos.copy()
-            fp.close()
-        # Sort them by mass.
-        halos.sort(order='m')
-        halos = np.flipud(halos)
-        return halos
-
-    def _parse_output(self):
-        """
-        Read the out_*.list text file produced
-        by Rockstar into memory."""
-
-        ds = self.ds
-        # In order to read the binary data, we need to figure out which
-        # binary files belong to this output.
-        basedir = os.path.dirname(self.out_list)
-        s = self.out_list.split('_')[-1]
-        s = s.rstrip('.list')
-        n = int(s)
-        fglob = path.join(basedir, 'halos_%d.*.bin' % n)
-        files = glob.glob(fglob)
-        halos = self._get_halos_binary(files)
-        length = 1.0 / ds['Mpchcm']
-        conv = dict(pos = np.array([length, length, length,
-                                    1, 1, 1]), # to unitary
-                    r=1.0/ds['kpchcm'], # to unitary
-                    rs=1.0/ds['kpchcm'], # to unitary
-                    )
-        #convert units
-        for name in self._halo_dt.names:
-            halos[name]=halos[name]*conv.get(name,1)
-        # Store the halos in the halo list.
-        for i, row in enumerate(halos):
-            supp = {name:row[name] for name in self._halo_dt.names}
-            # Delete the padding columns. 'supp' below will contain
-            # repeated information, but that's OK.
-            for item in self._tocleanup: del supp[item]
-            halo = RockstarHalo(self, i, size=row['num_p'],
-                CoM=row['pos'][0:3], group_total_mass=row['m'],
-                max_radius=row['r'], bulk_vel=row['bulkvel'],
-                rms_vel=row['vrms'], supp=supp)
-            self._groups.append(halo)
-
-    def write_particle_list(self):
-        pass
-
 class HOPHaloList(HaloList):
     """
     Run hop on *data_source* with a given density *threshold*.  If
@@ -1961,22 +1711,3 @@
         TextHaloList.__init__(self, ds, filename, columns, comment)
 
 LoadTextHalos = LoadTextHaloes
-
-class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
-    r"""Load Rockstar halos off disk from Rockstar-output format.
-
-    Parameters
-    ----------
-    fname : String
-        The name of the Rockstar file to read in. Default =
-        "rockstar_halos/out_0.list'.
-
-    Examples
-    --------
-    >>> ds = load("data0005")
-    >>> halos = LoadRockstarHalos(ds, "other_name.out")
-    """
-    def __init__(self, ds, filename = None):
-        if filename is None:
-            filename = 'rockstar_halos/out_0.list'
-        RockstarHaloList.__init__(self, ds, filename)

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -37,6 +37,7 @@
     PyArrayObject **xpos, PyArrayObject **ypos, PyArrayObject **zpos,
       PyArrayObject **mass)
 {
+    int num_particles;
 
     /* First the regular source arrays */
 
@@ -48,7 +49,7 @@
              "EnzoHop: xpos didn't work.");
     return -1;
     }
-    int num_particles = PyArray_SIZE(*xpos);
+    num_particles = PyArray_SIZE(*xpos);
 
     *ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
@@ -90,19 +91,25 @@
 
     PyArrayObject    *xpos, *ypos, *zpos,
                      *mass;
-    xpos=ypos=zpos=mass=NULL;
     npy_float64 totalmass = 0.0;
     float normalize_to = 1.0;
     float thresh = 160.0;
+    int i, num_particles;
+    KD kd;
+    int nBucket = 16, kdcount = 0;
+    PyArrayObject *particle_density;
+    HC my_comm;
+    PyArrayObject *particle_group_id;
+    PyObject *return_value;
 
-    int i;
+    xpos=ypos=zpos=mass=NULL;
 
     if (!PyArg_ParseTuple(args, "OOOO|ff",
         &oxpos, &oypos, &ozpos, &omass, &thresh, &normalize_to))
     return PyErr_Format(_HOPerror,
             "EnzoHop: Invalid parameters.");
 
-    int num_particles = convert_particle_arrays(
+    num_particles = convert_particle_arrays(
             oxpos, oypos, ozpos, omass,
             &xpos, &ypos, &zpos, &mass);
     if (num_particles < 0) goto _fail;
@@ -113,8 +120,6 @@
 
   /* initialize the kd hop structure */
 
-  KD kd;
-  int nBucket = 16, kdcount = 0;
   kdInit(&kd, nBucket);
   kd->nActive = num_particles;
   kd->p = malloc(sizeof(PARTICLE)*num_particles);
@@ -124,7 +129,7 @@
   }
   
  	/* Copy positions into kd structure. */
-    PyArrayObject *particle_density = (PyArrayObject *)
+    particle_density = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_FLOAT64));
 
@@ -137,7 +142,6 @@
     kd->totalmass = totalmass;
 	for (i = 0; i < num_particles; i++) kd->p[i].np_index = i;
 
-    HC my_comm;
     my_comm.s = newslice();
     my_comm.gl = (Grouplist*)malloc(sizeof(Grouplist));
     if(my_comm.gl == NULL) {
@@ -159,7 +163,7 @@
     // All we need to do is provide density and group information.
     
     // Tags (as per writetagsf77) are in gl.s->ntag+1 and there are gl.s->numlist of them.
-    PyArrayObject *particle_group_id = (PyArrayObject *)
+    particle_group_id = (PyArrayObject *)
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
                     PyArray_DescrFromType(NPY_INT32));
     
@@ -175,7 +179,7 @@
 
     PyArray_UpdateFlags(particle_density, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_density));
     PyArray_UpdateFlags(particle_group_id, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
-    PyObject *return_value = Py_BuildValue("NN", particle_density, particle_group_id);
+    return_value = Py_BuildValue("NN", particle_density, particle_group_id);
 
     Py_DECREF(xpos);
     Py_DECREF(ypos);
@@ -231,6 +235,8 @@
                              "nbuckets", "norm", NULL};
     PyObject    *oxpos, *oypos, *ozpos,
                 *omass;
+    npy_float64 totalmass = 0.0;
+
     self->xpos=self->ypos=self->zpos=self->mass=NULL;
 
 
@@ -257,7 +263,6 @@
             PyArray_SimpleNewFromDescr(1, PyArray_DIMS(self->xpos),
                     PyArray_DescrFromType(NPY_FLOAT64));
 
-    npy_float64 totalmass = 0.0;
     for(i= 0; i < self->num_particles; i++) {
         self->kd->p[i].np_index = i;
         *(npy_float64*)(PyArray_GETPTR1(self->densities, i)) = 0.0;
@@ -319,7 +324,8 @@
 
 static PyObject *
 kDTreeType_median_jst(kDTreeType *self, PyObject *args) {
-    int d, l, u;
+    int d, l, u, median;
+    PyObject *omedian;
 
     if (!PyArg_ParseTuple(args, "iii", &d, &l, &u))
         return PyErr_Format(_HOPerror,
@@ -337,9 +343,9 @@
         return PyErr_Format(_HOPerror,
             "kDTree.median_jst: u cannot be >= num_particles!");
 
-    int median = kdMedianJst(self->kd, d, l, u);
+    median = kdMedianJst(self->kd, d, l, u);
 
-    PyObject *omedian = PyLong_FromLong((long)median);
+    omedian = PyLong_FromLong((long)median);
     return omedian;
 }
 

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -15,6 +15,9 @@
  
 #include <stdio.h>
 #include <stdlib.h>
+#ifdef _WIN32
+#define _USE_MATH_DEFINES
+#endif
 #include <math.h>
 #include <string.h>
 #include <ctype.h>
@@ -551,13 +554,13 @@
 {
     int j, den;
     Boundary *hp;
- 
+    int nb = 0;
+
     my_comm->gdensity = vector(0,smx->nGroups-1);
     for (j=0;j<smx->nGroups;j++) {
         den = smx->densestingroup[j];
 	    my_comm->gdensity[j]=NP_DENS(smx->kd, den);
     }
-    int nb = 0;
     for (j=0, hp=smx->hash;j<smx->nHashLength; j++,hp++)
 	if (hp->nGroup1>=0)nb++;
     my_comm->ngroups = smx->nGroups;

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -12,10 +12,10 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
-#include <sys/time.h>
-#if defined(WIN32) || defined(WIN64) 
+#ifdef _WIN32
 #include <windows.h> 
 #else
+#include <sys/time.h>
 #include <sys/resource.h>
 #endif
 #include <assert.h>
@@ -31,7 +31,7 @@
 void kdTime(KD kd,int *puSecond,int *puMicro)
 {
 
-#if defined(WIN32) || defined(WIN64)
+#ifdef _WIN32
         int secs, usecs;
         HANDLE hProcess = GetCurrentProcess();
 	FILETIME ftCreation, ftExit, ftKernel, ftUser;

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/hop/hop_regroup.c
--- a/yt/analysis_modules/halo_finding/hop/hop_regroup.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_regroup.c
@@ -411,6 +411,10 @@
     FILE *fp;
     FILE *boundfp;
     float *gdensity = my_comm->gdensity;
+    int *g1temp,*g2temp;
+    float *denstemp;
+    int temppos = 0;
+
     ngroups = my_comm->ngroups;
 
     if (densthresh<MINDENS) densthresh=MINDENS;
@@ -446,13 +450,11 @@
        the arrays should be no larger than my_comm->nb. 
        Skory.
     */
-    int *g1temp,*g2temp;
-    float *denstemp;
+
     g1temp = (int *)malloc(sizeof(int) * my_comm->nb);
     g2temp = (int *)malloc(sizeof(int) * my_comm->nb);
     denstemp = (float *)malloc(sizeof(float) * my_comm->nb);
     
-    int temppos = 0;
     for(j=0;j<(my_comm->nb);j++) {
     g1 = my_comm->g1vec[j];
     g2 = my_comm->g2vec[j];

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/hop/hop_smooth.c
--- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_smooth.c
@@ -15,6 +15,9 @@
  
 #include <stdio.h>
 #include <stdlib.h>
+#ifdef _WIN32
+#define _USE_MATH_DEFINES
+#endif
 #include <math.h>
 #include <assert.h>
 #include "smooth.h"

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -21,8 +21,6 @@
     is_root, mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
-from yt.analysis_modules.halo_finding.halo_objects import \
-    RockstarHaloList
 from yt.utilities.exceptions import YTRockstarMultiMassNotSupported
 
 from . import rockstar_interface
@@ -366,10 +364,3 @@
             self.runner.run(self.handler, self.workgroup)
         self.comm.barrier()
         self.pool.free_all()
-    
-    def halo_list(self,file_name='out_0.list'):
-        """
-        Reads in the out_0.list file and generates RockstarHaloList
-        and RockstarHalo objects.
-        """
-        return RockstarHaloList(self.ts[0], self.outbase+'/%s'%file_name)

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/analysis_modules/ppv_cube/ppv_utils.pyx
--- a/yt/analysis_modules/ppv_cube/ppv_utils.pyx
+++ b/yt/analysis_modules/ppv_cube/ppv_utils.pyx
@@ -2,11 +2,7 @@
 cimport numpy as np
 cimport cython
 from yt.utilities.physical_constants import kboltz
-
-cdef extern from "math.h":
-    double exp(double x) nogil
-    double fabs(double x) nogil
-    double sqrt(double x) nogil
+from libc.math cimport exp, fabs, sqrt
 
 cdef double kb = kboltz.v
 cdef double pi = np.pi

diff -r df26665494274c1aec0aa42fb3a275738b2f4e47 -r f7ce756b14d365a5535dce20be665350fe9ca98e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1432,6 +1432,9 @@
             center = self.ds.arr(center, 'code_length')
         if iterable(width):
             w, u = width
+            if isinstance(w, tuple) and isinstance(u, tuple):
+                height = u
+                w, u = w
             width = self.ds.quan(w, input_units = u)
         elif not isinstance(width, YTArray):
             width = self.ds.quan(width, 'code_length')

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5013f317a793/
Changeset:   5013f317a793
Branch:      yt
User:        jzuhone
Date:        2016-04-23 12:02:46+00:00
Summary:     Updating athena++ frontend for new dataset design.
Affected #:  3 files

diff -r f7ce756b14d365a5535dce20be665350fe9ca98e -r 5013f317a793834d85af677ce78e704c812ff383 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -35,8 +35,6 @@
 
 from .fields import AthenaPPFieldInfo
 
-geometry_map = {"3DRectMesh": "cartesian"}
-
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0
 
@@ -75,45 +73,31 @@
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
-        self.index_filename = self.dataset.datafile
+        self.index_filename = self.dataset.filename
         self._handle = ds._handle
         GridIndex.__init__(self, ds, dataset_type)
 
     def _detect_output_fields(self):
-        self._field_map = dict((("athena++", k), v) for k, v in self.dataset._field_map.items())
-        self.field_list = list(self._field_map.keys())
+        self.field_list = [("athena++", k) for k in self.dataset._field_map]
 
     def _count_grids(self):
-        self.num_grids = self._handle.attrs["TotalMeshBlock"][0]
+        self.num_grids = self._handle.attrs["NumMeshBlocks"]
 
     def _parse_index(self):
-        coord_fields = self.dataset._coord_fields
-        num_grids = self._handle.attrs["TotalMeshBlock"][0]
+        num_grids = self._handle.attrs["NumMeshBlocks"]
 
         self.grid_left_edge = np.zeros((num_grids, 3), dtype='float64')
         self.grid_right_edge = np.zeros((num_grids, 3), dtype='float64')
         self.grid_dimensions = np.zeros((num_grids, 3), dtype='int32')
-        levels = np.zeros(num_grids, dtype='int32')
 
         for i in range(num_grids):
-            x = self._handle["MeshBlock%d" % i][coord_fields[0]]
-            y = self._handle["MeshBlock%d" % i][coord_fields[1]]
-            z = self._handle["MeshBlock%d" % i][coord_fields[2]]
+            x = self._handle["x1f"][i,:]
+            y = self._handle["x2f"][i,:]
+            z = self._handle["x3f"][i,:]
             self.grid_left_edge[i] = np.array([x[0], y[0], z[0]], dtype='float64')
             self.grid_right_edge[i] = np.array([x[-1], y[-1], z[-1]], dtype='float64')
             self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
-            levels[i] = self._handle["MeshBlock%d" % i].attrs["Level"][0]
-
-        new_dle = np.min(self.grid_left_edge, axis=0)
-        new_dre = np.max(self.grid_right_edge, axis=0)
-        self.dataset.domain_left_edge[:] = np.round(new_dle, decimals=12)[:]
-        self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
-        self.dataset.domain_width = \
-                (self.dataset.domain_right_edge -
-                 self.dataset.domain_left_edge)
-        self.dataset.domain_center = \
-                0.5*(self.dataset.domain_left_edge +
-                     self.dataset.domain_right_edge)
+        levels = self._handle["Levels"][:]
 
         self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
         self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
@@ -133,7 +117,7 @@
             g._prepare_grid()
             g._setup_dx()
         self._reconstruct_parent_child()
-        self.max_level = self._handle.attrs["MaxLevel"][0]
+        self.max_level = self._handle.attrs["MaxLevel"]
 
     def _reconstruct_parent_child(self):
         mask = np.empty(len(self.grids), dtype='int32')
@@ -177,6 +161,7 @@
         self.specified_parameters = parameters
         if units_override is None:
             units_override = {}
+        self._handle = HDF5FileHandler(filename)
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.filename = filename
@@ -184,10 +169,6 @@
             storage_filename = '%s.yt' % filename.split('/')[-1]
         self.storage_filename = storage_filename
         self.backup_filename = self.filename[:-4] + "_backup.gdf"
-        # Unfortunately we now have to mandate that the index gets
-        # instantiated so that we can make sure we have the correct left
-        # and right domain edges.
-        self.index
 
     def _set_code_unit_attributes(self):
         """
@@ -214,38 +195,33 @@
         self.unit_registry.modify("code_velocity", self.velocity_unit)
 
     def _parse_parameter_file(self):
-        from lxml import etree
-        tree = etree.parse(self.parameter_filename)
-        root = tree.getroot()
-        grids = root.findall("./Domain/Grid")[0]
-        grid0 = grids[0]
-        geometry = grid0.find("Topology").attrib["TopologyType"]
-        coords = grid0.find("Geometry")
-        self._coord_fields = [coord.text.split("/")[-1] for coord in coords]
-        self._field_map = {}
-        for field in grid0.findall("Attribute"):
-            self._field_map[field.attrib["Name"]] = field[0].text.split("/")[-1]
-        self.datafile = coords[0].text.split(":")[0]
-        self._handle = HDF5FileHandler(self.datafile)
 
-        xmin = self._handle["MeshBlock0"][self._coord_fields[0]][0]
-        ymin = self._handle["MeshBlock0"][self._coord_fields[1]][0]
-        zmin = self._handle["MeshBlock0"][self._coord_fields[2]][0]
-        mylog.info("Temporarily setting a likely bogus domain_right_edge and "
-                   "domain_left_edge. This will be corrected later.")
+        xmin, xmax = self._handle.attrs["RootGridX1"][:2]
+        ymin, ymax = self._handle.attrs["RootGridX2"][:2]
+        zmin, zmax = self._handle.attrs["RootGridX3"][:2]
+
         self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
-        self.domain_right_edge = -self.domain_left_edge
+        self.domain_right_edge = np.array([xmax, ymax, zmax], dtype='float64')
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self._handle.attrs["RootGridSize"]
 
+        self._field_map = {}
+        k = 0
+        for (i, dname), num_var in zip(enumerate(self._handle.attrs["DatasetNames"]),
+                                       self._handle.attrs["NumVariables"]):
+            for j in range(num_var):
+                fname = self._handle.attrs["VariableNames"][k].decode("ascii","ignore")
+                self._field_map[fname] = (dname.decode("ascii","ignore"), j)
+                k += 1
+
         self.refine_by = 2
         dimensionality = 3
-        if self.domain_dimensions[2] == 1 :
+        if self.domain_dimensions[2] == 1:
             dimensionality = 2
-        if self.domain_dimensions[1] == 1 :
+        if self.domain_dimensions[1] == 1:
             dimensionality = 1
         self.dimensionality = dimensionality
-        self.current_time = self._handle.attrs["Time"][0]
+        self.current_time = self._handle.attrs["Time"]
         self.unique_identifier = self.parameter_filename.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
@@ -268,12 +244,12 @@
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
             self.parameters["Gamma"] = 5./3.
-        self.geometry = geometry_map.get(geometry, "cartesian")
+        self.geometry = self._handle.attrs["Coordinates"].decode('utf-8')
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            if 'athdf.xdmf' in args[0]:
+            if args[0].endswith('athdf'):
                 return True
         except:
             pass

diff -r f7ce756b14d365a5535dce20be665350fe9ca98e -r 5013f317a793834d85af677ce78e704c812ff383 yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -25,14 +25,14 @@
 
 class AthenaPPFieldInfo(FieldInfoContainer):
     known_other_fields = (
-        ("gas_density", (rho_units, ["density"], None)),
-        ("gas_pressure", (pres_units, ["pressure"], None)),
-        ("gas_velocity_x1", (vel_units, ["velocity_x"], None)),
-        ("gas_velocity_x2", (vel_units, ["velocity_y"], None)),
-        ("gas_velocity_x3", (vel_units, ["velocity_z"], None)),
-        ("bfield_x1", (b_units, [], None)),
-        ("bfield_x2", (b_units, [], None)),
-        ("bfield_x3", (b_units, [], None)),
+        ("rho", (rho_units, ["density"], None)),
+        ("pgas", (pres_units, ["pressure"], None)),
+        ("vel1", (vel_units, ["velocity_x"], None)),
+        ("vel2", (vel_units, ["velocity_y"], None)),
+        ("vel3", (vel_units, ["velocity_z"], None)),
+        ("B1", (b_units, [], None)),
+        ("B2", (b_units, [], None)),
+        ("B3", (b_units, [], None)),
     )
 
     def setup_fluid_fields(self):
@@ -48,6 +48,6 @@
         self.add_field(("gas","temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
-        setup_magnetic_field_aliases(self, "athena++", ["bfield_x%d" % ax for ax in (1,2,3)])
+        setup_magnetic_field_aliases(self, "athena++", ["B%d" % ax for ax in (1,2,3)])
 
 

diff -r f7ce756b14d365a5535dce20be665350fe9ca98e -r 5013f317a793834d85af677ce78e704c812ff383 yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -52,13 +52,17 @@
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
-            f_fname = self.ds._field_map[field[1]]
+            ftype, fname = field
+            dname, fdi = self.ds._field_map[fname]
+            ds = f["/%s" % dname]
             ind = 0
             for chunk in chunks:
                 for gs in grid_sequences(chunk.objs):
+                    start = gs[0].id - gs[0]._id_offset
+                    end = gs[-1].id - gs[-1]._id_offset + 1
+                    data = ds[fdi,start:end,:,:,:].transpose()
                     for i, g in enumerate(gs):
-                        data = f["MeshBlock%d" % g.id][f_fname][:,:,:].transpose()
-                        ind += g.select(selector, data, rv[field], ind)
+                        ind += g.select(selector, data[...,i], rv[field], ind)
         return rv
 
     def _read_chunk_data(self, chunk, fields):
@@ -69,10 +73,13 @@
         if len(fields) == 0:
             return rv
         for field in fields:
-            f_fname = self.ds._field_map[field[1]]
+            ftype, fname = field
+            dname, fdi = self.ds._field_map[fname]
+            ds = f["/%s" % dname]
             for gs in grid_sequences(chunk.objs):
+                start = gs[0].id - gs[0]._id_offset
+                end = gs[-1].id - gs[-1]._id_offset + 1
+                data = ds[fdi,start:end,:,:,:].transpose()
                 for i, g in enumerate(gs):
-                    data = f["MeshBlock%d" % g.id][f_fname][:,:,:].transpose()
-                    rv[g.id][field] = np.asarray(data, "=f8")
-        return rv
-
+                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
+        return rv
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/051d30a4a2f4/
Changeset:   051d30a4a2f4
Branch:      yt
User:        jzuhone
Date:        2016-04-23 12:03:37+00:00
Summary:     Merge
Affected #:  31 files

diff -r 5013f317a793834d85af677ce78e704c812ff383 -r 051d30a4a2f40e288663b198c95f2cc0a38e437a doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -1,394 +1,4 @@
-#
-# Hi there!  Welcome to the yt installation script.
-#
-# This script is designed to create a fully isolated Python installation
-# with the dependencies you need to run yt.
-#
-# This script is based on Conda, a distribution mechanism from Continuum
-# Analytics.  The process is as follows:
-#
-#  1. Download the appropriate Conda installation package
-#  2. Install Conda into the specified directory
-#  3. Install yt-specific dependencies
-#  4. Install yt
-#
-# There are a few options listed below, but by default, this will install
-# everything.  At the end, it will tell you what to do to use yt.
-#
-# By default this will install yt from source.
-#
-# If you experience problems, please visit the Help section at
-# http://yt-project.org.
-#
-DEST_SUFFIX="yt-conda"
-DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
-INST_UNSTRUCTURED=1 # Do we want to build with unstructured mesh support?
-
-##################################################################
-#                                                                #
-# You will likely not have to modify anything below this region. #
-#                                                                #
-##################################################################
-
-LOG_FILE="`pwd`/yt_install.log"
-
-# Here is the idiom for redirecting to the log file:
-# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
-MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="latest"
-YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-  if [ $INST_YT_SOURCE -eq 0 ]
-  then
-      echo "yt must be compiled from source to use the unstructured mesh support."
-      echo "Please set INST_YT_SOURCE to 1 and re-run."
-      exit 1
-  fi
-  if [ `uname` = "Darwin" ]
-  then
-      EMBREE="embree-2.8.0.x86_64.macosx"
-      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
-  else
-      EMBREE="embree-2.8.0.x86_64.linux"
-      EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
-  fi
-  PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
-fi
-
-function do_exit
-{
-    echo "********************************************"
-    echo "        FAILURE REPORT:"
-    echo "********************************************"
-    echo
-    tail -n 10 ${LOG_FILE}
-    echo
-    echo "********************************************"
-    echo "********************************************"
-    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
-    exit 1
-}
-
-function log_cmd
-{
-    echo "EXECUTING:" >> ${LOG_FILE}
-    echo "  $*" >> ${LOG_FILE}
-    ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-# These are needed to prevent pushd and popd from printing to stdout
-
-function pushd () {
-    command pushd "$@" > /dev/null
-}
-
-function popd () {
-    command popd "$@" > /dev/null
-}
-
-function get_ytdata
-{
-    echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
-    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-function get_ytrecipe {
-    RDIR=${DEST_DIR}/src/yt-recipes/$1
-    mkdir -p ${RDIR}
-    pushd ${RDIR}
-    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
-    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
-    NEW_PKG=`conda build --output ${RDIR}`
-    log_cmd conda build --no-binstar-upload ${RDIR}
-    log_cmd conda install ${NEW_PKG}
-    popd
-}
-
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "Hi there!  This is the yt installation script.  We're going to download"
-echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for yt to run within."
-echo
-echo "This will install Miniconda from Continuum Analytics, the necessary"
-echo "packages to run yt, and create a self-contained environment for you to"
-echo "use yt.  Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes using the"
-echo "'conda install' command."
-echo
-MYOS=`uname -s`       # A guess at the OS
-if [ $INST_YT_SOURCE -ne 0 ]
-then
-    if [ "${MYOS##Darwin}" != "${MYOS}" ]
-    then
-        echo "Looks like you're running on Mac OSX."
-        echo
-        echo "NOTE: you must have the Xcode command line tools installed."
-        echo
-        echo "The instructions for obtaining these tools varies according"
-        echo "to your exact OS version.  On older versions of OS X, you"
-        echo "must register for an account on the apple developer tools"
-        echo "website: https://developer.apple.com/downloads to obtain the"
-        echo "download link."
-        echo
-        echo "We have gathered some additional instructions for each"
-        echo "version of OS X below. If you have trouble installing yt"
-        echo "after following these instructions, don't hesitate to contact"
-        echo "the yt user's e-mail list."
-        echo
-        echo "You can see which version of OSX you are running by clicking"
-        echo "'About This Mac' in the apple menu on the left hand side of"
-        echo "menu bar.  We're assuming that you've installed all operating"
-        echo "system updates; if you have an older version, we suggest"
-        echo "running software update and installing all available updates."
-        echo
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-        echo "Apple developer tools website."
-        echo
-        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-        echo "developer tools website.  You can either download the"
-        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-        echo "Software Update to update to XCode 3.2.6 or"
-        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-        echo "bundle (4.1 GB)."
-        echo
-        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-        echo "(search for Xcode)."
-        echo "Alternatively, download the Xcode command line tools from"
-        echo "the Apple developer tools website."
-        echo
-        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
-        echo "download the appropriate version of Xcode from the"
-        echo "mac app store (search for Xcode)."
-        echo
-        echo "Additionally, you will have to manually install the Xcode"
-        echo "command line tools."
-        echo
-        echo "For OS X 10.8, see:"
-        echo "http://stackoverflow.com/questions/9353444"
-        echo
-        echo "For OS X 10.9 and newer the command line tools can be installed"
-        echo "with the following command:"
-        echo "    xcode-select --install"
-    fi
-    if [ "${MYOS##Linux}" != "${MYOS}" ]
-    then
-        echo "Looks like you're on Linux."
-        echo
-        echo "Please make sure you have the developer tools for your OS "
-        echo "installed."
-        echo
-        if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
-        then
-            echo "Looks like you're on an OpenSUSE-compatible machine."
-            echo
-            echo "You need to have these packages installed:"
-            echo
-            echo "  * devel_C_C++"
-            echo "  * libuuid-devel"
-            echo "  * gcc-c++"
-            echo "  * chrpath"
-            echo
-            echo "You can accomplish this by executing:"
-            echo
-            echo "$ sudo zypper install -t pattern devel_C_C++"
-            echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
-            echo "$ sudo zypper install chrpath"
-        fi
-        if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
-        then
-            echo "Looks like you're on an Ubuntu-compatible machine."
-            echo
-            echo "You need to have these packages installed:"
-            echo
-            echo "  * libssl-dev"
-            echo "  * build-essential"
-            echo "  * libncurses5"
-            echo "  * libncurses5-dev"
-            echo "  * uuid-dev"
-            echo "  * chrpath"
-            echo
-            echo "You can accomplish this by executing:"
-            echo
-            echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
-            echo
-        fi
-        echo
-        echo "If you are running on a supercomputer or other module-enabled"
-        echo "system, please make sure that the GNU module has been loaded."
-        echo
-    fi
-fi
-if [ "${MYOS##x86_64}" != "${MYOS}" ]
-then
-    MINICONDA_OS="Linux-x86_64"
-elif [ "${MYOS##i386}" != "${MYOS}" ]
-then
-    MINICONDA_OS="Linux-x86"
-elif [ "${MYOS##Darwin}" != "${MYOS}" ]
-then
-     MINICONDA_OS="MacOSX-x86_64"
-else
-    echo "Not sure which Linux distro you are running."
-    echo "Going with x86_64 architecture."
-    MINICONDA_OS="Linux-x86_64"
-fi
-echo
-echo "If you'd rather not continue, hit Ctrl-C."
-echo
-echo "========================================================================"
-echo
-read -p "[hit enter] "
-echo
-echo "Awesome!  Here we go."
-echo
-
-MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
-
-if type -P wget &>/dev/null
-then
-    echo "Using wget"
-    export GETFILE="wget -nv -nc"
-else
-    echo "Using curl"
-    export GETFILE="curl -sSO"
-fi
-
-echo
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
-echo
-
-${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
-
-echo "Installing the Miniconda python environment."
-
-log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
-
-# This we *do* need.
-export PATH=${DEST_DIR}/bin:$PATH
-
-echo "Installing the necessary packages for yt."
-echo "This may take a while, but don't worry.  yt loves you."
-
-declare -a YT_DEPS
-YT_DEPS+=('python')
-YT_DEPS+=('setuptools')
-YT_DEPS+=('numpy')
-YT_DEPS+=('jupyter')
-YT_DEPS+=('ipython')
-YT_DEPS+=('sphinx')
-YT_DEPS+=('h5py')
-YT_DEPS+=('matplotlib')
-YT_DEPS+=('cython')
-YT_DEPS+=('nose')
-YT_DEPS+=('conda-build')
-YT_DEPS+=('mercurial')
-YT_DEPS+=('sympy')
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-  YT_DEPS+=('netcdf4')
-fi
-
-# Here is our dependency list for yt
-log_cmd conda update --yes conda
-
-log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-for YT_DEP in "${YT_DEPS[@]}"; do
-    echo "Installing $YT_DEP"
-    log_cmd conda install --yes ${YT_DEP}
-done
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-
-  echo "Installing embree"
-  mkdir ${DEST_DIR}/src
-  cd ${DEST_DIR}/src
-  ( ${GETFILE} "$EMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
-  log_cmd tar xfz ${EMBREE}.tar.gz
-  log_cmd mv ${DEST_DIR}/src/${EMBREE}/include/embree2 ${DEST_DIR}/include
-  log_cmd mv ${DEST_DIR}/src/${EMBREE}/lib/lib*.* ${DEST_DIR}/lib
-  if [ `uname` = "Darwin" ]
-  then
-    ln -s ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.dylib
-    install_name_tool -id ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.2.dylib
-  else
-    ln -s ${DEST_DIR}/lib/libembree.so.2 ${DEST_DIR}/lib/libembree.so
-  fi
-
-  echo "Installing pyembree from source"
-  ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
-  log_cmd unzip ${DEST_DIR}/src/master.zip
-  pushd ${DEST_DIR}/src/pyembree-master
-  log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
-  popd
-fi
-
-if [ $INST_YT_SOURCE -eq 0 ]
-then
-  echo "Installing yt"
-  log_cmd conda install --yes yt
-else
-    # We do a source install.
-    echo "Installing yt from source"
-    YT_DIR="${DEST_DIR}/src/yt-hg"
-    log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-    echo $DEST_DIR > ${YT_DIR}/embree.cfg
-fi
-    pushd ${YT_DIR}
-    log_cmd python setup.py develop
-    popd
-fi
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "yt and the Conda system are now installed in $DEST_DIR ."
-echo
-echo "You must now modify your PATH variable by prepending:"
-echo
-echo "   $DEST_DIR/bin"
-echo
-echo "On Bash-style shells you can copy/paste the following command to "
-echo "temporarily activate the yt installation:"
-echo
-echo "    export PATH=$DEST_DIR/bin:\$PATH"
-echo
-echo "and on csh-style shells:"
-echo
-echo "    setenv PATH $DEST_DIR/bin:\$PATH"
-echo
-echo "You can also update the init file appropriate for your shell to include"
-echo "the same command."
-echo
-echo "To get started with yt, check out the orientation:"
-echo
-echo "    http://yt-project.org/doc/orientation/"
-echo
-echo "For support, see the website and join the mailing list:"
-echo
-echo "    http://yt-project.org/"
-echo "    http://yt-project.org/data/      (Sample data)"
-echo "    http://yt-project.org/doc/       (Docs)"
-echo
-echo "    http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
-echo
-echo "========================================================================"
-echo
-echo "Oh, look at me, still talking when there's science to do!"
-echo "Good luck, and email the user list if you run into any problems."
+echo "This script has been deprecated."
+echo "You can now create a conda-based build using install_script.sh"
+echo "Please download that script and run it"
+exit 0

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e9fb5e309540/
Changeset:   e9fb5e309540
Branch:      yt
User:        jzuhone
Date:        2016-04-23 22:16:02+00:00
Summary:     Code units will be the default for Athena++.
Affected #:  1 file

diff -r 051d30a4a2f40e288663b198c95f2cc0a38e437a -r e9fb5e309540a2d5bfdda25124a932f7f124ed3a yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -154,7 +154,7 @@
 
     def __init__(self, filename, dataset_type='athena++',
                  storage_filename=None, parameters=None,
-                 units_override=None, unit_system="cgs"):
+                 units_override=None, unit_system="code"):
         self.fluid_types += ("athena++",)
         if parameters is None:
             parameters = {}


https://bitbucket.org/yt_analysis/yt/commits/aca0408e4d2e/
Changeset:   aca0408e4d2e
Branch:      yt
User:        jzuhone
Date:        2016-04-25 19:34:10+00:00
Summary:     Mapping of Athena++'s geometry names to ours
Affected #:  1 file

diff -r e9fb5e309540a2d5bfdda25124a932f7f124ed3a -r aca0408e4d2ef912538da4a3637928b26d443689 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -35,6 +35,9 @@
 
 from .fields import AthenaPPFieldInfo
 
+geom_map = {"cartesian": "cartesian",
+            "spherical_polar": "spherical"}
+
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0
 
@@ -244,7 +247,7 @@
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
             self.parameters["Gamma"] = 5./3.
-        self.geometry = self._handle.attrs["Coordinates"].decode('utf-8')
+        self.geometry = geom_map[self._handle.attrs["Coordinates"].decode('utf-8')]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/f57d4c98a815/
Changeset:   f57d4c98a815
Branch:      yt
User:        jzuhone
Date:        2016-04-30 18:50:35+00:00
Summary:     Make velocity fields also work when conserved variables are in the file.
Affected #:  1 file

diff -r aca0408e4d2ef912538da4a3637928b26d443689 -r f57d4c98a815ded9c982a9acf277e377519e60b3 yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -23,13 +23,16 @@
 rho_units = "code_mass / code_length**3"
 vel_units = "code_length / code_time"
 
+def velocity_field(comp):
+    def _velocity(field, data):
+        return data["athena++", "mom%d" % comp]/data["athena++","dens"]
+    return _velocity
+
 class AthenaPPFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("rho", (rho_units, ["density"], None)),
+        ("dens", (rho_units, ["density"], None)),
         ("pgas", (pres_units, ["pressure"], None)),
-        ("vel1", (vel_units, ["velocity_x"], None)),
-        ("vel2", (vel_units, ["velocity_y"], None)),
-        ("vel3", (vel_units, ["velocity_z"], None)),
         ("B1", (b_units, [], None)),
         ("B2", (b_units, [], None)),
         ("B3", (b_units, [], None)),
@@ -39,6 +42,22 @@
         from yt.fields.magnetic_field import \
             setup_magnetic_field_aliases
         unit_system = self.ds.unit_system
+        # Add velocity fields
+        for i, comp in enumerate(self.ds.coordinates.axis_order):
+            vel_field = ("athena++", "vel%d" % (i+1))
+            mom_field = ("athena++", "mom%d" % (i+1))
+            if vel_field in self.field_list:
+                self.add_output_field(vel_field, units="code_length/code_time")
+                self.alias(("gas","velocity_%s" % comp), vel_field,
+                           units=unit_system["velocity"])
+            elif mom_field in self.field_list:
+                print("happy happy joy joy", mom_field)
+                self.add_output_field(mom_field,
+                                      units="code_mass/code_time/code_length**2")
+                print(comp)
+                self.add_field(("gas","velocity_%s" % comp),
+                               function=velocity_field(i+1), units=unit_system["velocity"])
+        # Add temperature field
         def _temperature(field, data):
             if data.has_field_parameter("mu"):
                 mu = data.get_field_parameter("mu")


https://bitbucket.org/yt_analysis/yt/commits/84019205440f/
Changeset:   84019205440f
Branch:      yt
User:        jzuhone
Date:        2016-04-30 19:02:51+00:00
Summary:     Adding in other coordinate systems
Affected #:  1 file

diff -r f57d4c98a815ded9c982a9acf277e377519e60b3 -r 84019205440fd281249f923fd7c3e4e0003f108d yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -36,7 +36,13 @@
 from .fields import AthenaPPFieldInfo
 
 geom_map = {"cartesian": "cartesian",
-            "spherical_polar": "spherical"}
+            "cylindrical": "cylindrical",
+            "spherical_polar": "spherical",
+            "minkowski": "cartesian",
+            "tilted": "cartesian",
+            "sinusoidal": "cartesian",
+            "schwarzschild": "spherical",
+            "kerr-schild": "spherical"}
 
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0


https://bitbucket.org/yt_analysis/yt/commits/d97dfcbed015/
Changeset:   d97dfcbed015
Branch:      yt
User:        jzuhone
Date:        2016-04-30 19:15:06+00:00
Summary:     Map velocity fields to correct components in correct geometries
Affected #:  1 file

diff -r 84019205440fd281249f923fd7c3e4e0003f108d -r d97dfcbed015e4d6c720ae65f738630b3b624dbd yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -23,9 +23,9 @@
 rho_units = "code_mass / code_length**3"
 vel_units = "code_length / code_time"
 
-def velocity_field(comp):
+def velocity_field(j):
     def _velocity(field, data):
-        return data["athena++", "mom%d" % comp]/data["athena++","dens"]
+        return data["athena++", "mom%d" % j]/data["athena++","dens"]
     return _velocity
 
 class AthenaPPFieldInfo(FieldInfoContainer):
@@ -43,19 +43,23 @@
             setup_magnetic_field_aliases
         unit_system = self.ds.unit_system
         # Add velocity fields
+        vel_prefix = "velocity"
+        if self.ds.geometry != "cartesian":
+            vel_prefix += "_"+self.ds.geometry
         for i, comp in enumerate(self.ds.coordinates.axis_order):
             vel_field = ("athena++", "vel%d" % (i+1))
             mom_field = ("athena++", "mom%d" % (i+1))
+            if comp == "r":
+                comp = "radius"
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
-                self.alias(("gas","velocity_%s" % comp), vel_field,
+                self.alias(("gas","%s_%s" % (vel_prefix, comp)), vel_field,
                            units=unit_system["velocity"])
             elif mom_field in self.field_list:
-                print("happy happy joy joy", mom_field)
                 self.add_output_field(mom_field,
                                       units="code_mass/code_time/code_length**2")
                 print(comp)
-                self.add_field(("gas","velocity_%s" % comp),
+                self.add_field(("gas","%s_%s" % (vel_prefix, comp)),
                                function=velocity_field(i+1), units=unit_system["velocity"])
         # Add temperature field
         def _temperature(field, data):


https://bitbucket.org/yt_analysis/yt/commits/219afd4a7bde/
Changeset:   219afd4a7bde
Branch:      yt
User:        jzuhone
Date:        2016-04-30 19:21:41+00:00
Summary:     Add in total energy density field with correct units
Affected #:  1 file

diff -r d97dfcbed015e4d6c720ae65f738630b3b624dbd -r 219afd4a7bde1ce1b356468566df750ae3b826cc yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -33,6 +33,7 @@
         ("rho", (rho_units, ["density"], None)),
         ("dens", (rho_units, ["density"], None)),
         ("pgas", (pres_units, ["pressure"], None)),
+        ("Etot", (pres_units, [], None)),
         ("B1", (b_units, [], None)),
         ("B2", (b_units, [], None)),
         ("B3", (b_units, [], None)),


https://bitbucket.org/yt_analysis/yt/commits/ccf4e37c19e1/
Changeset:   ccf4e37c19e1
Branch:      yt
User:        jzuhone
Date:        2016-04-30 19:59:42+00:00
Summary:     Handling pressure and thermal energy correctly
Affected #:  1 file

diff -r 219afd4a7bde1ce1b356468566df750ae3b826cc -r ccf4e37c19e12fc9b4103a630da53c0c4008ef32 yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -32,8 +32,6 @@
     known_other_fields = (
         ("rho", (rho_units, ["density"], None)),
         ("dens", (rho_units, ["density"], None)),
-        ("pgas", (pres_units, ["pressure"], None)),
-        ("Etot", (pres_units, [], None)),
         ("B1", (b_units, [], None)),
         ("B2", (b_units, [], None)),
         ("B3", (b_units, [], None)),
@@ -62,6 +60,26 @@
                 print(comp)
                 self.add_field(("gas","%s_%s" % (vel_prefix, comp)),
                                function=velocity_field(i+1), units=unit_system["velocity"])
+        # Figure out thermal energy field
+        if ("athena++","pgas") in self.field_list:
+            self.add_output_field(("athena++","pgas"),
+                                  units=pres_units)
+            self.alias(("gas","pressure"),("athena++","pgas"),
+                       units=unit_system["pressure"])
+            def _thermal_energy(field, data):
+                return data["athena++","pgas"] / \
+                       (data.ds.gamma-1.)/data["athena++","rho"]
+        elif ("athena++","Etot") in self.field_list:
+            self.add_output_field(("athena++","Etot"),
+                                  units=pres_units)
+            def _thermal_energy(field, data):
+                eint = data["athena++", "Etot"] - data["gas","kinetic_energy"]
+                if ("athena++", "B1") in self.field_list:
+                    eint -= data["gas","magnetic_energy"]
+                return eint/data["athena++","dens"]
+        self.add_field(("gas","thermal_energy"),
+                       function=_thermal_energy,
+                       units=unit_system["specific_energy"])
         # Add temperature field
         def _temperature(field, data):
             if data.has_field_parameter("mu"):


https://bitbucket.org/yt_analysis/yt/commits/068cf17c421d/
Changeset:   068cf17c421d
Branch:      yt
User:        jzuhone
Date:        2016-04-30 20:47:59+00:00
Summary:     Add same fixes and simplifications to Athena
Affected #:  1 file

diff -r ccf4e37c19e12fc9b4103a630da53c0c4008ef32 -r 068cf17c421dae78cf81ca5bd71b490457b07b73 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -59,29 +59,17 @@
                 self.add_field(("gas","velocity_%s" % comp),
                                function=velocity_field(comp), units = unit_system["velocity"])
         # Add pressure, energy, and temperature fields
-        def ekin1(data):
-            return 0.5*(data["athena","momentum_x"]**2 +
-                        data["athena","momentum_y"]**2 +
-                        data["athena","momentum_z"]**2)/data["athena","density"]
-        def ekin2(data):
-            return 0.5*(data["athena","velocity_x"]**2 +
-                        data["athena","velocity_y"]**2 +
-                        data["athena","velocity_z"]**2)*data["athena","density"]
-        def emag(data):
-            return 0.5*(data["cell_centered_B_x"]**2 +
-                        data["cell_centered_B_y"]**2 +
-                        data["cell_centered_B_z"]**2)
         def eint_from_etot(data):
             eint = data["athena","total_energy"]
-            eint -= ekin1(data)
+            eint -= data["gas","kinetic_energy"]
             if ("athena","cell_centered_B_x") in self.field_list:
-                eint -= emag(data)
+                eint -= data["gas","magnetic_energy"]
             return eint
         def etot_from_pres(data):
             etot = data["athena","pressure"]/(data.ds.gamma-1.)
-            etot += ekin2(data)
+            etot += data["gas", "kinetic_energy"]
             if ("athena","cell_centered_B_x") in self.field_list:
-                etot += emag(data)
+                etot += data["gas", "magnetic_energy"]
             return etot
         if ("athena","pressure") in self.field_list:
             self.add_output_field(("athena","pressure"),
@@ -102,10 +90,6 @@
         elif ("athena","total_energy") in self.field_list:
             self.add_output_field(("athena","total_energy"),
                                   units=pres_units)
-            def _pressure(field, data):
-                return eint_from_etot(data)*(data.ds.gamma-1.0)
-            self.add_field(("gas","pressure"), function=_pressure,
-                           units=unit_system["pressure"])
             def _thermal_energy(field, data):
                 return eint_from_etot(data)/data["athena","density"]
             self.add_field(("gas","thermal_energy"),
@@ -116,7 +100,7 @@
             self.add_field(("gas","total_energy"),
                            function=_total_energy,
                            units=unit_system["specific_energy"])
-
+        # Add temperature field
         def _temperature(field, data):
             if data.has_field_parameter("mu"):
                 mu = data.get_field_parameter("mu")


https://bitbucket.org/yt_analysis/yt/commits/c470e39a1be4/
Changeset:   c470e39a1be4
Branch:      yt
User:        jzuhone
Date:        2016-04-30 20:49:04+00:00
Summary:     Get rid of print
Affected #:  1 file

diff -r 068cf17c421dae78cf81ca5bd71b490457b07b73 -r c470e39a1be486b6b8e77204a381d9a161f3bf5e yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -57,7 +57,6 @@
             elif mom_field in self.field_list:
                 self.add_output_field(mom_field,
                                       units="code_mass/code_time/code_length**2")
-                print(comp)
                 self.add_field(("gas","%s_%s" % (vel_prefix, comp)),
                                function=velocity_field(i+1), units=unit_system["velocity"])
         # Figure out thermal energy field


https://bitbucket.org/yt_analysis/yt/commits/8eabdf8ef83d/
Changeset:   8eabdf8ef83d
Branch:      yt
User:        MatthewTurk
Date:        2016-04-07 20:52:42+00:00
Summary:     Begin adding support for log spherical coordinates.
Affected #:  4 files

diff -r c7470f10bdc0a7851036a0baa30d88f1d65e4c9c -r 8eabdf8ef83de696007ee8834f5b6677cb729213 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -69,6 +69,7 @@
     PolarCoordinateHandler, \
     CylindricalCoordinateHandler, \
     SphericalCoordinateHandler, \
+    LogSphericalCoordinateHandler, \
     GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
@@ -486,6 +487,8 @@
             cls = PolarCoordinateHandler
         elif self.geometry == "spherical":
             cls = SphericalCoordinateHandler
+        elif self.geometry == "logspherical":
+            cls = LogSphericalCoordinateHandler
         elif self.geometry == "geographic":
             cls = GeographicCoordinateHandler
         elif self.geometry == "spectral_cube":

diff -r c7470f10bdc0a7851036a0baa30d88f1d65e4c9c -r 8eabdf8ef83de696007ee8834f5b6677cb729213 yt/geometry/coordinates/api.py
--- a/yt/geometry/coordinates/api.py
+++ b/yt/geometry/coordinates/api.py
@@ -21,7 +21,8 @@
 from .cylindrical_coordinates import \
     CylindricalCoordinateHandler
 from .spherical_coordinates import \
-    SphericalCoordinateHandler
+    SphericalCoordinateHandler, \
+    LogSphericalCoordinateHandler
 from .geographic_coordinates import \
     GeographicCoordinateHandler
 from .spec_cube_coordinates import \

diff -r c7470f10bdc0a7851036a0baa30d88f1d65e4c9c -r 8eabdf8ef83de696007ee8834f5b6677cb729213 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -252,3 +252,247 @@
         print("Actual volume is                   %0.16e" % (v2))
         print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
 
+class LogSphericalCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, ds, ordering = ('logr', 'theta', 'phi')):
+        super(LogSphericalCoordinateHandler, self).__init__(ds, ordering)
+        # Generate 
+        self.image_units = {}
+        self.image_units[self.axis_id['logr']] = ("rad", "rad")
+        self.image_units[self.axis_id['theta']] = (None, None)
+        self.image_units[self.axis_id['phi']] = (None, None)
+
+    def setup_fields(self, registry):
+        # return the fields for r, z, theta
+        registry.add_field(("index", "dx"), function=_unknown_coord)
+        registry.add_field(("index", "dy"), function=_unknown_coord)
+        registry.add_field(("index", "dz"), function=_unknown_coord)
+        registry.add_field(("index", "x"), function=_unknown_coord)
+        registry.add_field(("index", "y"), function=_unknown_coord)
+        registry.add_field(("index", "z"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(self.axis_id['logr'])
+        registry.add_field(("index", "dlogr"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "logr"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(self.axis_id['theta'], "")
+        registry.add_field(("index", "dtheta"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "theta"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(self.axis_id['phi'], "")
+        registry.add_field(("index", "dphi"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "phi"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        def _logr_to_r(field, data):
+            return data.apply_units(10**data["logr"], "code_length")
+        registry.add_field(("index", "r"),
+                 function = _logr_to_r,
+                 units = "code_length")
+
+        def _dlogr_to_dr(field, data):
+            LE = 10**(data["logr"] - data["dlogr"]/2.0)
+            RE = 10**(data["logr"] + data["dlogr"]/2.0)
+            return data.apply_units(RE - LE, "code_length")
+        registry.add_field(("index", "dr"),
+                 function = _dlogr_to_dr,
+                 units = "code_length")
+
+        def _SphericalVolume(field, data):
+            # r**2 sin theta dr dtheta dphi
+            vol = data["index", "r"]**2.0
+            vol *= data["index", "dr"]
+            vol *= np.sin(data["index", "theta"])
+            vol *= data["index", "dtheta"]
+            vol *= data["index", "dphi"]
+            return vol
+        registry.add_field(("index", "cell_volume"),
+                 function=_SphericalVolume,
+                 units = "code_length**3")
+
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_element_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_element_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_phi(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] \
+                    * data["index", "dphi"] \
+                    * np.sin(data["index", "theta"])
+        registry.add_field(("index", "path_element_phi"),
+                 function = _path_phi,
+                 units = "code_length")
+
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
+        self.period
+        name = self.axis_name[dimension]
+        if name == 'logr':
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
+        elif name in ('theta', 'phi'):
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                          antialias, dimension)
+        else:
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        buff = pixelize_aitoff(data_source["py"], data_source["pdy"],
+                               data_source["px"], data_source["pdx"],
+                               size, data_source[field], None,
+                               None, theta_offset = 0,
+                               phi_offset = 0).transpose()
+        return buff
+
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
+                      dimension):
+        if dimension == 1:
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
+                                     size, data_source[field], bounds)
+        elif dimension == 2:
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
+                                     size, data_source[field], bounds)
+            buff = buff.transpose()
+        else:
+            raise RuntimeError
+        return buff
+
+
+    def convert_from_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cartesian(self, coord):
+        if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
+            ri = 10**self.axis_id['logr']
+            thetai = self.axis_id['theta']
+            phii = self.axis_id['phi']
+            r = coord[:,ri]
+            theta = coord[:,thetai]
+            phi = coord[:,phii]
+            nc = np.zeros_like(coord)
+            # r, theta, phi
+            nc[:,ri] = np.cos(phi) * np.sin(theta)*r
+            nc[:,thetai] = np.sin(phi) * np.sin(theta)*r
+            nc[:,phii] = np.cos(theta) * r
+        else:
+            r, theta, phi = coord
+            nc = (np.cos(phi) * np.sin(theta)*r,
+                  np.sin(phi) * np.sin(theta)*r,
+                  np.cos(theta) * r)
+        return nc
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    _image_axis_name = None
+    @property
+    def image_axis_name(self):    
+        if self._image_axis_name is not None:
+            return self._image_axis_name
+        # This is the x and y axes labels that get displayed.  For
+        # non-Cartesian coordinates, we usually want to override these for
+        # Cartesian coordinates, since we transform them.
+        rv = {self.axis_id['logr']: ('theta', 'phi'),
+              self.axis_id['theta']: ('log_{10}(x) / \\sin(\\theta)', 'log_{10}(y) / \\sin(\\theta)'),
+              self.axis_id['phi']: ('log_{10}(R)', 'z')}
+        for i in list(rv.keys()):
+            rv[self.axis_name[i]] = rv[i]
+            rv[self.axis_name[i].capitalize()] = rv[i]
+        self._image_axis_name = rv
+        return rv
+
+    _x_pairs = (('logr', 'theta'), ('theta', 'logr'), ('phi', 'logr'))
+    _y_pairs = (('logr', 'phi'), ('theta', 'phi'), ('phi', 'theta'))
+
+    @property
+    def period(self):
+        return self.ds.domain_width
+
+    def sanitize_center(self, center, axis):
+        center, display_center = super(
+            LogSphericalCoordinateHandler, self).sanitize_center(center, axis)
+        name = self.axis_name[axis]
+        if name == 'logr':
+            display_center = center
+        elif name == 'theta':
+            display_center = (0.0 * display_center[0],
+                              0.0 * display_center[1],
+                              0.0 * display_center[2])
+        elif name == 'phi':
+            display_center = [self.ds.domain_width[0]/2.0 +
+                                self.ds.domain_left_edge[0],
+                              0.0 * display_center[1],
+                              0.0 * display_center[2]]
+            ri = self.axis_id['logr']
+            c = self.ds.domain_width[ri]/2.0 + self.ds.domain_left_edge[ri]
+            display_center[ri] = c
+            display_center = tuple(display_center)
+        return center, display_center
+
+    def sanitize_width(self, axis, width, depth):
+        name = self.axis_name[axis]
+        if width is not None:
+            width = super(LogSphericalCoordinateHandler, self).sanitize_width(
+              axis, width, depth)
+        elif name == 'logr':
+            width = [self.ds.domain_width[self.x_axis['logr']],
+                     self.ds.domain_width[self.y_axis['logr']]]
+        elif name == 'theta':
+            ri = self.axis_id['logr']
+            # Remember, in spherical coordinates when we cut in theta,
+            # we create a conic section
+            width = [2.0*self.ds.domain_width[ri],
+                     2.0*self.ds.domain_width[ri]]
+        elif name == 'phi':
+            ri = self.axis_id['logr']
+            width = [self.ds.domain_right_edge[ri],
+                     2.0*self.ds.domain_width[ri]]
+        return width
+
+    def _sanity_check(self):
+        """This prints out a handful of diagnostics that help verify the
+        dataset is well formed."""
+        # We just check a few things here.
+        dd = self.ds.all_data()
+        r0 = 10**self.ds.domain_left_edge[self.axis_id['logr']]
+        r1 = 10**self.ds.domain_right_edge[self.axis_id['logr']]
+        v1 = 4.0 * np.pi / 3.0 * (r1**3 - r0**3)
+        print("Total volume should be 4*pi*r**3 = %0.16e" % (v1))
+        v2 = dd.quantities.total_quantity("cell_volume")
+        print("Actual volume is                   %0.16e" % (v2))
+        print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
+

diff -r c7470f10bdc0a7851036a0baa30d88f1d65e4c9c -r 8eabdf8ef83de696007ee8834f5b6677cb729213 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -213,11 +213,12 @@
 
 _geom_transforms = {
     # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
-    'cartesian'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
-    'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
-    'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
-    'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
-    'geographic' : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
+    'cartesian'    : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
+    'spherical'    : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
+    'logspherical' : ( (-3.0, 0.0, 0.0), (5.0, np.pi, 2*np.pi) ),
+    'cylindrical'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
+    'polar'        : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+    'geographic'   : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
 }
 
 def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):


https://bitbucket.org/yt_analysis/yt/commits/f543eb5570b5/
Changeset:   f543eb5570b5
Branch:      yt
User:        jzuhone
Date:        2016-04-30 23:30:14+00:00
Summary:     Merge
Affected #:  4 files

diff -r c470e39a1be486b6b8e77204a381d9a161f3bf5e -r f543eb5570b5ceea5106f2711be5f4c0606ea554 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -69,6 +69,7 @@
     PolarCoordinateHandler, \
     CylindricalCoordinateHandler, \
     SphericalCoordinateHandler, \
+    LogSphericalCoordinateHandler, \
     GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
@@ -486,6 +487,8 @@
             cls = PolarCoordinateHandler
         elif self.geometry == "spherical":
             cls = SphericalCoordinateHandler
+        elif self.geometry == "logspherical":
+            cls = LogSphericalCoordinateHandler
         elif self.geometry == "geographic":
             cls = GeographicCoordinateHandler
         elif self.geometry == "spectral_cube":

diff -r c470e39a1be486b6b8e77204a381d9a161f3bf5e -r f543eb5570b5ceea5106f2711be5f4c0606ea554 yt/geometry/coordinates/api.py
--- a/yt/geometry/coordinates/api.py
+++ b/yt/geometry/coordinates/api.py
@@ -21,7 +21,8 @@
 from .cylindrical_coordinates import \
     CylindricalCoordinateHandler
 from .spherical_coordinates import \
-    SphericalCoordinateHandler
+    SphericalCoordinateHandler, \
+    LogSphericalCoordinateHandler
 from .geographic_coordinates import \
     GeographicCoordinateHandler
 from .spec_cube_coordinates import \

diff -r c470e39a1be486b6b8e77204a381d9a161f3bf5e -r f543eb5570b5ceea5106f2711be5f4c0606ea554 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -253,3 +253,247 @@
         print("Actual volume is                   %0.16e" % (v2))
         print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
 
+class LogSphericalCoordinateHandler(CoordinateHandler):
+
+    def __init__(self, ds, ordering = ('logr', 'theta', 'phi')):
+        super(LogSphericalCoordinateHandler, self).__init__(ds, ordering)
+        # Generate 
+        self.image_units = {}
+        self.image_units[self.axis_id['logr']] = ("rad", "rad")
+        self.image_units[self.axis_id['theta']] = (None, None)
+        self.image_units[self.axis_id['phi']] = (None, None)
+
+    def setup_fields(self, registry):
+        # return the fields for r, z, theta
+        registry.add_field(("index", "dx"), function=_unknown_coord)
+        registry.add_field(("index", "dy"), function=_unknown_coord)
+        registry.add_field(("index", "dz"), function=_unknown_coord)
+        registry.add_field(("index", "x"), function=_unknown_coord)
+        registry.add_field(("index", "y"), function=_unknown_coord)
+        registry.add_field(("index", "z"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(self.axis_id['logr'])
+        registry.add_field(("index", "dlogr"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "logr"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(self.axis_id['theta'], "")
+        registry.add_field(("index", "dtheta"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "theta"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        f1, f2 = _get_coord_fields(self.axis_id['phi'], "")
+        registry.add_field(("index", "dphi"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "phi"), function = f2,
+                           display_field = False,
+                           units = "")
+
+        def _logr_to_r(field, data):
+            return data.apply_units(10**data["logr"], "code_length")
+        registry.add_field(("index", "r"),
+                 function = _logr_to_r,
+                 units = "code_length")
+
+        def _dlogr_to_dr(field, data):
+            LE = 10**(data["logr"] - data["dlogr"]/2.0)
+            RE = 10**(data["logr"] + data["dlogr"]/2.0)
+            return data.apply_units(RE - LE, "code_length")
+        registry.add_field(("index", "dr"),
+                 function = _dlogr_to_dr,
+                 units = "code_length")
+
+        def _SphericalVolume(field, data):
+            # r**2 sin theta dr dtheta dphi
+            vol = data["index", "r"]**2.0
+            vol *= data["index", "dr"]
+            vol *= np.sin(data["index", "theta"])
+            vol *= data["index", "dtheta"]
+            vol *= data["index", "dphi"]
+            return vol
+        registry.add_field(("index", "cell_volume"),
+                 function=_SphericalVolume,
+                 units = "code_length**3")
+
+        def _path_r(field, data):
+            return data["index", "dr"]
+        registry.add_field(("index", "path_element_r"),
+                 function = _path_r,
+                 units = "code_length")
+        def _path_theta(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] * data["index", "dtheta"]
+        registry.add_field(("index", "path_element_theta"),
+                 function = _path_theta,
+                 units = "code_length")
+        def _path_phi(field, data):
+            # Note: this already assumes cell-centered
+            return data["index", "r"] \
+                    * data["index", "dphi"] \
+                    * np.sin(data["index", "theta"])
+        registry.add_field(("index", "path_element_phi"),
+                 function = _path_phi,
+                 units = "code_length")
+
+    def pixelize(self, dimension, data_source, field, bounds, size,
+                 antialias = True, periodic = True):
+        self.period
+        name = self.axis_name[dimension]
+        if name == 'logr':
+            return self._ortho_pixelize(data_source, field, bounds, size,
+                                        antialias, dimension, periodic)
+        elif name in ('theta', 'phi'):
+            return self._cyl_pixelize(data_source, field, bounds, size,
+                                          antialias, dimension)
+        else:
+            raise NotImplementedError
+
+    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
+                        dim, periodic):
+        buff = pixelize_aitoff(data_source["py"], data_source["pdy"],
+                               data_source["px"], data_source["pdx"],
+                               size, data_source[field], None,
+                               None, theta_offset = 0,
+                               phi_offset = 0).transpose()
+        return buff
+
+
+    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
+                      dimension):
+        if dimension == 1:
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
+                                     size, data_source[field], bounds)
+        elif dimension == 2:
+            buff = pixelize_cylinder(data_source['px'],
+                                     data_source['pdx'],
+                                     data_source['py'],
+                                     data_source['pdy'],
+                                     size, data_source[field], bounds)
+            buff = buff.transpose()
+        else:
+            raise RuntimeError
+        return buff
+
+
+    def convert_from_cartesian(self, coord):
+        raise NotImplementedError
+
+    def convert_to_cartesian(self, coord):
+        if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
+            ri = 10**self.axis_id['logr']
+            thetai = self.axis_id['theta']
+            phii = self.axis_id['phi']
+            r = coord[:,ri]
+            theta = coord[:,thetai]
+            phi = coord[:,phii]
+            nc = np.zeros_like(coord)
+            # r, theta, phi
+            nc[:,ri] = np.cos(phi) * np.sin(theta)*r
+            nc[:,thetai] = np.sin(phi) * np.sin(theta)*r
+            nc[:,phii] = np.cos(theta) * r
+        else:
+            r, theta, phi = coord
+            nc = (np.cos(phi) * np.sin(theta)*r,
+                  np.sin(phi) * np.sin(theta)*r,
+                  np.cos(theta) * r)
+        return nc
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_to_spherical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_spherical(self, coord):
+        raise NotImplementedError
+
+    _image_axis_name = None
+    @property
+    def image_axis_name(self):    
+        if self._image_axis_name is not None:
+            return self._image_axis_name
+        # This is the x and y axes labels that get displayed.  For
+        # non-Cartesian coordinates, we usually want to override these for
+        # Cartesian coordinates, since we transform them.
+        rv = {self.axis_id['logr']: ('theta', 'phi'),
+              self.axis_id['theta']: ('log_{10}(x) / \\sin(\\theta)', 'log_{10}(y) / \\sin(\\theta)'),
+              self.axis_id['phi']: ('log_{10}(R)', 'z')}
+        for i in list(rv.keys()):
+            rv[self.axis_name[i]] = rv[i]
+            rv[self.axis_name[i].capitalize()] = rv[i]
+        self._image_axis_name = rv
+        return rv
+
+    _x_pairs = (('logr', 'theta'), ('theta', 'logr'), ('phi', 'logr'))
+    _y_pairs = (('logr', 'phi'), ('theta', 'phi'), ('phi', 'theta'))
+
+    @property
+    def period(self):
+        return self.ds.domain_width
+
+    def sanitize_center(self, center, axis):
+        center, display_center = super(
+            LogSphericalCoordinateHandler, self).sanitize_center(center, axis)
+        name = self.axis_name[axis]
+        if name == 'logr':
+            display_center = center
+        elif name == 'theta':
+            display_center = (0.0 * display_center[0],
+                              0.0 * display_center[1],
+                              0.0 * display_center[2])
+        elif name == 'phi':
+            display_center = [self.ds.domain_width[0]/2.0 +
+                                self.ds.domain_left_edge[0],
+                              0.0 * display_center[1],
+                              0.0 * display_center[2]]
+            ri = self.axis_id['logr']
+            c = self.ds.domain_width[ri]/2.0 + self.ds.domain_left_edge[ri]
+            display_center[ri] = c
+            display_center = tuple(display_center)
+        return center, display_center
+
+    def sanitize_width(self, axis, width, depth):
+        name = self.axis_name[axis]
+        if width is not None:
+            width = super(LogSphericalCoordinateHandler, self).sanitize_width(
+              axis, width, depth)
+        elif name == 'logr':
+            width = [self.ds.domain_width[self.x_axis['logr']],
+                     self.ds.domain_width[self.y_axis['logr']]]
+        elif name == 'theta':
+            ri = self.axis_id['logr']
+            # Remember, in spherical coordinates when we cut in theta,
+            # we create a conic section
+            width = [2.0*self.ds.domain_width[ri],
+                     2.0*self.ds.domain_width[ri]]
+        elif name == 'phi':
+            ri = self.axis_id['logr']
+            width = [self.ds.domain_right_edge[ri],
+                     2.0*self.ds.domain_width[ri]]
+        return width
+
+    def _sanity_check(self):
+        """This prints out a handful of diagnostics that help verify the
+        dataset is well formed."""
+        # We just check a few things here.
+        dd = self.ds.all_data()
+        r0 = 10**self.ds.domain_left_edge[self.axis_id['logr']]
+        r1 = 10**self.ds.domain_right_edge[self.axis_id['logr']]
+        v1 = 4.0 * np.pi / 3.0 * (r1**3 - r0**3)
+        print("Total volume should be 4*pi*r**3 = %0.16e" % (v1))
+        v2 = dd.quantities.total_quantity("cell_volume")
+        print("Actual volume is                   %0.16e" % (v2))
+        print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
+

diff -r c470e39a1be486b6b8e77204a381d9a161f3bf5e -r f543eb5570b5ceea5106f2711be5f4c0606ea554 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -213,11 +213,12 @@
 
 _geom_transforms = {
     # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
-    'cartesian'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
-    'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
-    'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
-    'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
-    'geographic' : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
+    'cartesian'    : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
+    'spherical'    : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
+    'logspherical' : ( (-3.0, 0.0, 0.0), (5.0, np.pi, 2*np.pi) ),
+    'cylindrical'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
+    'polar'        : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+    'geographic'   : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
 }
 
 def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):


https://bitbucket.org/yt_analysis/yt/commits/10dc56aed6c4/
Changeset:   10dc56aed6c4
Branch:      yt
User:        jzuhone
Date:        2016-05-01 14:43:34+00:00
Summary:     Need to specify no units here
Affected #:  1 file

diff -r f543eb5570b5ceea5106f2711be5f4c0606ea554 -r 10dc56aed6c4febed305f8254689b26b25efaccc yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -271,7 +271,7 @@
         registry.add_field(("index", "x"), function=_unknown_coord)
         registry.add_field(("index", "y"), function=_unknown_coord)
         registry.add_field(("index", "z"), function=_unknown_coord)
-        f1, f2 = _get_coord_fields(self.axis_id['logr'])
+        f1, f2 = _get_coord_fields(self.axis_id['logr'], "")
         registry.add_field(("index", "dlogr"), function = f1,
                            display_field = False,
                            units = "")


https://bitbucket.org/yt_analysis/yt/commits/85ae690edcee/
Changeset:   85ae690edcee
Branch:      yt
User:        jzuhone
Date:        2016-05-01 14:43:51+00:00
Summary:     Log-spherical for Athena++
Affected #:  1 file

diff -r 10dc56aed6c4febed305f8254689b26b25efaccc -r 85ae690edcee092153b75ba6a3ac32e7aceb9c36 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -108,6 +108,10 @@
             self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
         levels = self._handle["Levels"][:]
 
+        if self.ds.geometry == "logspherical":
+            self.grid_left_edge[:,0] = np.log10(self.grid_left_edge[:,0])
+            self.grid_right_edge[:,0] = np.log10(self.grid_right_edge[:,0])
+
         self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
         self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
 
@@ -205,12 +209,20 @@
 
     def _parse_parameter_file(self):
 
-        xmin, xmax = self._handle.attrs["RootGridX1"][:2]
-        ymin, ymax = self._handle.attrs["RootGridX2"][:2]
-        zmin, zmax = self._handle.attrs["RootGridX3"][:2]
+        xmin, xmax, xrat = self._handle.attrs["RootGridX1"][:]
+        ymin, ymax, yrat = self._handle.attrs["RootGridX2"][:]
+        zmin, zmax, yrat = self._handle.attrs["RootGridX3"][:]
 
         self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
         self.domain_right_edge = np.array([xmax, ymax, zmax], dtype='float64')
+
+        self.geometry = geom_map[self._handle.attrs["Coordinates"].decode('utf-8')]
+
+        if xrat != 1.0 and self.geometry == "spherical":
+            self.geometry = "logspherical"
+            self.domain_left_edge[0] = np.log10(self.domain_left_edge[0])
+            self.domain_right_edge[0] = np.log10(self.domain_right_edge[0])
+
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self._handle.attrs["RootGridSize"]
 
@@ -253,7 +265,6 @@
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
         else:
             self.parameters["Gamma"] = 5./3.
-        self.geometry = geom_map[self._handle.attrs["Coordinates"].decode('utf-8')]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/90543da8c945/
Changeset:   90543da8c945
Branch:      yt
User:        jzuhone
Date:        2016-05-12 22:06:14+00:00
Summary:     Use a semi-structured mesh for this kind of data instead.
Affected #:  1 file

diff -r 85ae690edcee092153b75ba6a3ac32e7aceb9c36 -r 90543da8c9450a672fe61c7ce58780b70f881f58 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -32,6 +32,11 @@
     YTDataChunk
 from yt.utilities.file_handler import \
     HDF5FileHandler
+from yt.geometry.unstructured_mesh_handler import \
+    UnstructuredIndex
+from yt.data_objects.unstructured_mesh import \
+    SemiStructuredMesh
+from itertools import chain, product
 
 from .fields import AthenaPPFieldInfo
 
@@ -44,6 +49,53 @@
             "schwarzschild": "spherical",
             "kerr-schild": "spherical"}
 
+_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                   dtype=np.int64, count = 8*3)
+_cis.shape = (8, 3)
+
+class AthenaPPLogarithmicMesh(SemiStructuredMesh):
+    _index_offset = 0
+
+    def __init__(self, *args, **kwargs):
+        super(AthenaPPLogarithmicMesh, self).__init__(*args, **kwargs)
+
+class AthenaPPLogarithmicIndex(UnstructuredIndex):
+    def __init__(self, ds, dataset_type = 'athena++'):
+        super(AthenaPPLogarithmicIndex, self).__init__(ds, dataset_type)
+        self._handle = ds._handle
+        self.index_filename = self.dataset.filename
+        self.directory = os.path.dirname(self.dataset.filename)
+        self.dataset_type = dataset_type
+
+    def _initialize_mesh(self):
+        num_grids = self._handle.attrs["NumMeshBlocks"]
+        self.meshes = []
+        for i in range(num_grids):
+            x = self._handle["x1f"][i,:]
+            y = self._handle["x2f"][i,:]
+            z = self._handle["x3f"][i,:]
+            nx = len(x)
+            ny = len(y)
+            nz = len(z)
+            coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
+            coords[:,:,:,0] = x[:,None,None]
+            coords[:,:,:,1] = y[None,:,None]
+            coords[:,:,:,2] = z[None,None,:]
+            coords.shape = (nx * ny * nz, 3)
+            cycle = np.rollaxis(np.indices((nx-1,ny-1,nz-1)), 0, 4)
+            cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+            off = _cis + cycle[:, np.newaxis]
+            connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+            mesh = AthenaPPLogarithmicMesh(i,
+                                           self.index_filename,
+                                           connectivity,
+                                           coords,
+                                           self)
+            self.meshes.append(mesh)
+
+    def _detect_output_fields(self):
+        self.field_list = [("athena++", k) for k in self.ds._field_map]
+
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0
 
@@ -108,10 +160,6 @@
             self.grid_dimensions[i] = self._handle.attrs["MeshBlockSize"]
         levels = self._handle["Levels"][:]
 
-        if self.ds.geometry == "logspherical":
-            self.grid_left_edge[:,0] = np.log10(self.grid_left_edge[:,0])
-            self.grid_right_edge[:,0] = np.log10(self.grid_right_edge[:,0])
-
         self.grid_left_edge = self.ds.arr(self.grid_left_edge, "code_length")
         self.grid_right_edge = self.ds.arr(self.grid_right_edge, "code_length")
 
@@ -161,7 +209,6 @@
                               cache = cache)
 
 class AthenaPPDataset(Dataset):
-    _index_class = AthenaPPHierarchy
     _field_info_class = AthenaPPFieldInfo
     _dataset_type = "athena++"
 
@@ -175,6 +222,13 @@
         if units_override is None:
             units_override = {}
         self._handle = HDF5FileHandler(filename)
+        xrat = self._handle.attrs["RootGridX1"][2]
+        yrat = self._handle.attrs["RootGridX2"][2]
+        zrat = self._handle.attrs["RootGridX3"][2]
+        if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:
+            self._index_class = AthenaPPLogarithmicMesh
+        else:
+            self._index_class = AthenaPPHierarchy
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.filename = filename
@@ -209,20 +263,14 @@
 
     def _parse_parameter_file(self):
 
-        xmin, xmax, xrat = self._handle.attrs["RootGridX1"][:]
-        ymin, ymax, yrat = self._handle.attrs["RootGridX2"][:]
-        zmin, zmax, yrat = self._handle.attrs["RootGridX3"][:]
+        xmin, xmax = self._handle.attrs["RootGridX1"][:2]
+        ymin, ymax = self._handle.attrs["RootGridX2"][:2]
+        zmin, zmax = self._handle.attrs["RootGridX3"][:2]
 
         self.domain_left_edge = np.array([xmin, ymin, zmin], dtype='float64')
         self.domain_right_edge = np.array([xmax, ymax, zmax], dtype='float64')
 
         self.geometry = geom_map[self._handle.attrs["Coordinates"].decode('utf-8')]
-
-        if xrat != 1.0 and self.geometry == "spherical":
-            self.geometry = "logspherical"
-            self.domain_left_edge[0] = np.log10(self.domain_left_edge[0])
-            self.domain_right_edge[0] = np.log10(self.domain_right_edge[0])
-
         self.domain_width = self.domain_right_edge-self.domain_left_edge
         self.domain_dimensions = self._handle.attrs["RootGridSize"]
 


https://bitbucket.org/yt_analysis/yt/commits/6063cd03f661/
Changeset:   6063cd03f661
Branch:      yt
User:        jzuhone
Date:        2016-05-12 23:01:58+00:00
Summary:     Bug fixes
Affected #:  1 file

diff -r 90543da8c9450a672fe61c7ce58780b70f881f58 -r 6063cd03f661e9d3b39d457ef8136680ab05c0e9 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -61,22 +61,21 @@
 
 class AthenaPPLogarithmicIndex(UnstructuredIndex):
     def __init__(self, ds, dataset_type = 'athena++'):
+        self._handle = ds._handle
         super(AthenaPPLogarithmicIndex, self).__init__(ds, dataset_type)
-        self._handle = ds._handle
         self.index_filename = self.dataset.filename
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
 
     def _initialize_mesh(self):
+        mylog.debug("Setting up grids.")
         num_grids = self._handle.attrs["NumMeshBlocks"]
         self.meshes = []
         for i in range(num_grids):
             x = self._handle["x1f"][i,:]
             y = self._handle["x2f"][i,:]
             z = self._handle["x3f"][i,:]
-            nx = len(x)
-            ny = len(y)
-            nz = len(z)
+            nx, ny, nz = self._handle.attrs["MeshBlockSize"]+1
             coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
             coords[:,:,:,0] = x[:,None,None]
             coords[:,:,:,1] = y[None,:,None]
@@ -92,6 +91,7 @@
                                            coords,
                                            self)
             self.meshes.append(mesh)
+        mylog.debug("Done setting up grids.")
 
     def _detect_output_fields(self):
         self.field_list = [("athena++", k) for k in self.ds._field_map]
@@ -226,7 +226,7 @@
         yrat = self._handle.attrs["RootGridX2"][2]
         zrat = self._handle.attrs["RootGridX3"][2]
         if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:
-            self._index_class = AthenaPPLogarithmicMesh
+            self._index_class = AthenaPPLogarithmicIndex
         else:
             self._index_class = AthenaPPHierarchy
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,


https://bitbucket.org/yt_analysis/yt/commits/57bcd94ed3be/
Changeset:   57bcd94ed3be
Branch:      yt
User:        jzuhone
Date:        2016-05-12 23:03:34+00:00
Summary:     Don't want to choose field names this way
Affected #:  1 file

diff -r 6063cd03f661e9d3b39d457ef8136680ab05c0e9 -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -43,13 +43,9 @@
         unit_system = self.ds.unit_system
         # Add velocity fields
         vel_prefix = "velocity"
-        if self.ds.geometry != "cartesian":
-            vel_prefix += "_"+self.ds.geometry
         for i, comp in enumerate(self.ds.coordinates.axis_order):
             vel_field = ("athena++", "vel%d" % (i+1))
             mom_field = ("athena++", "mom%d" % (i+1))
-            if comp == "r":
-                comp = "radius"
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
                 self.alias(("gas","%s_%s" % (vel_prefix, comp)), vel_field,


https://bitbucket.org/yt_analysis/yt/commits/560bd19f1294/
Changeset:   560bd19f1294
Branch:      yt
User:        jzuhone
Date:        2016-05-13 01:30:01+00:00
Summary:     Try to speed things up. Make this compatible with the IO handler
Affected #:  1 file

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r 560bd19f129415afe315135ffcc6213b7c11f8c7 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -55,10 +55,15 @@
 
 class AthenaPPLogarithmicMesh(SemiStructuredMesh):
     _index_offset = 0
+    _id_offset = 0
 
     def __init__(self, *args, **kwargs):
         super(AthenaPPLogarithmicMesh, self).__init__(*args, **kwargs)
 
+    @property
+    def id(self):
+        return self.mesh_id
+
 class AthenaPPLogarithmicIndex(UnstructuredIndex):
     def __init__(self, ds, dataset_type = 'athena++'):
         self._handle = ds._handle
@@ -96,6 +101,11 @@
     def _detect_output_fields(self):
         self.field_list = [("athena++", k) for k in self.ds._field_map]
 
+    def _count_selection(self, dobj, meshes = None):
+        if meshes is None: meshes = dobj._chunk_info
+        count = np.sum([m.count(dobj.selector) for m in meshes])
+        return count
+
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0
 


https://bitbucket.org/yt_analysis/yt/commits/c98eaac166e7/
Changeset:   c98eaac166e7
Branch:      yt
User:        jzuhone
Date:        2016-05-14 14:10:59+00:00
Summary:     Merge
Affected #:  93 files

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -994,6 +994,17 @@
         cd ..
     fi
 
+    ( ${DEST_DIR}/bin/python -c "import _ssl" 2>&1 ) 1>> ${LOG_FILE}
+    RESULT=$?
+    if  [ $RESULT -ne 0 ]
+    then
+        echo "Unable to import the python SSL bindings."
+        echo "This means that OpenSSL is not installed or your system's OpenSSL"
+        echo "installation is out of date."
+        echo "Please install OpenSSL or set INST_CONDA=1"
+        do_exit
+    fi
+
     if [ $INST_PY3 -eq 1 ]
     then
         if [ ! -e $PYTHON3/done ]

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -3,8 +3,8 @@
 from yt.analysis_modules.cosmological_observation.api import \
     LightRay
 
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-lr = LightRay(fn)
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+lr = LightRay(ds)
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
@@ -17,7 +17,6 @@
 
 # Optionally, we can now overplot this ray on a projection of the source
 # dataset
-ds = yt.load(fn)
 p = yt.ProjectionPlot(ds, 'z', 'density')
 p.annotate_ray(lr)
 p.save()

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -34,7 +34,8 @@
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
 To get started, make a new directory in ``yt/frontends`` with the name
-of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+of your code and add the name into ``yt/frontends/api.py``.
+Copying the contents of the ``yt/frontends/_skeleton``
 directory will add a lot of boilerplate for the required classes and
 methods that are needed.  In particular, you'll have to create a
 subclass of ``Dataset`` in the data_structures.py file. This subclass

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 
@@ -532,7 +538,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1021,6 +1021,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
-
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+.. _conda-source-build:
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
-
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
 - ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
-
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+  $ pip install numpy matplotlib cython sympy
 
-.. code-block:: bash
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
-
-.. note::
-
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
-  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
-  if you are curious why ``--prefix=`` is neccessary on some systems.
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
+  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
+  if you are curious why ``--prefix=`` is neccessary on some systems.
 
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,6 +34,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -55,7 +55,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
@@ -113,24 +113,23 @@
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("redshift0058")
-   dd = ds.sphere("max", (200, "kpc"))
-   rho = 5e-27
-
-   bounds = [(dd.center[i] - 100.0/ds['kpc'],
-              dd.center[i] + 100.0/ds['kpc']) for i in range(3)]
+    import yt
+    from yt.units import kpc
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    dd = ds.sphere(ds.domain_center, (500, "kpc"))
+    rho = 1e-28
 
-   surf = ds.surface(dd, "density", rho)
+    bounds = [[dd.center[i] - 250*kpc, dd.center[i] + 250*kpc] for i in range(3)]
 
-   upload_id = surf.export_sketchfab(
-       title = "RD0058 - 5e-27",
-       description = "Extraction of Density (colored by Temperature) at 5e-27 " \
-                   + "g/cc from a galaxy formation simulation by Ryan Joung."
-       color_field = "temperature",
-       color_map = "hot",
-       color_log = True,
-       bounds = bounds
+    surf = ds.surface(dd, "density", rho)
+
+    upload_id = surf.export_sketchfab(
+        title="galaxy0030 - 1e-28",
+        description="Extraction of Density (colored by temperature) at 1e-28 g/cc",
+        color_field="temperature",
+        color_map="hot",
+        color_log=True,
+        bounds=bounds
    )
 
 and yt will extract a surface, convert to a format that Sketchfab.com
@@ -141,15 +140,13 @@
 
 .. raw:: html
 
-   <iframe frameborder="0" height="480" width="854" allowFullScreen
-   webkitallowfullscreen="true" mozallowfullscreen="true"
-   src="http://skfb.ly/l4jh2edcba?autostart=0&transparent=0&autospin=0&controls=1&watermark=1"></iframe>
+     <iframe width="640" height="480" src="https://sketchfab.com/models/ff59dacd55824110ad5bcc292371a514/embed" frameborder="0" allowfullscreen mozallowfullscreen="true" webkitallowfullscreen="true" onmousewheel=""></iframe>
 
 As a note, Sketchfab has a maximum model size of 50MB for the free account.
-50MB is pretty hefty, though, so it shouldn't be a problem for most needs.
-We're working on a way to optionally upload links to the Sketchfab models on
-the `yt Hub <https://hub.yt-project.org/>`_, but for now, if you want to share
-a cool model we'd love to see it!
+50MB is pretty hefty, though, so it shouldn't be a problem for most
+needs. Additionally, if you have an eligible e-mail address associated with a
+school or university, you can request a free professional account, which allows
+models up to 200MB. See https://sketchfab.com/education for details.
 
 OBJ and MTL Files
 -----------------
@@ -167,7 +164,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'
@@ -239,7 +236,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,14 +157,21 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.primitives",
+              ["yt/utilities/lib/primitives.pyx"],
+              libraries=std_libs, 
+              depends=["yt/utilities/lib/primitives.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/element_mappings.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/primitives.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -275,20 +282,30 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd"]),
+                  depends=["yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/mesh_intersection.pxd",
+                           "yt/utlilites/lib/mesh_samplers.pxd",
+                           "yt/utlilites/lib/mesh_traversal.pxd"]),
         Extension("yt.utilities.lib.mesh_traversal",
                   ["yt/utilities/lib/mesh_traversal.pyx"],
                   depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd"]),
+                           "yt/utilities/lib/grid_traversal.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
         Extension("yt.utilities.lib.mesh_samplers",
                   ["yt/utilities/lib/mesh_samplers.pyx"],
                   depends=["yt/utilities/lib/mesh_samplers.pxd",
                            "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/primitives.pxd"]),
         Extension("yt.utilities.lib.mesh_intersection",
                   ["yt/utilities/lib/mesh_intersection.pyx"],
                   depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/primitives.pxd",
+                           "yt/utilities/lib/vec3_ops.pxd"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -1,10 +1,11 @@
 import os
 from pkg_resources import resource_filename
 import shutil
-import subprocess
+from subprocess import Popen, PIPE
 import sys
 import tempfile
 
+
 def check_for_openmp():
     """Returns True if local setup supports OpenMP, False otherwise"""
 
@@ -37,13 +38,21 @@
             "}"
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-fopenmp', filename],
-                                        stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-fopenmp', filename],
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+        
+        if exit_code != 0:
+            print("Compilation of OpenMP test code failed with the error: ")
+            print(err)
+            print("Disabling OpenMP support. ")
 
         # Clean up
         file.close()
     except OSError:
+        print("check_for_openmp() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
         return False
     finally:
         os.chdir(curdir)
@@ -82,12 +91,11 @@
         except IOError:
             rd = '/usr/local'
 
-    fail_msg = ("Pyembree is installed, but I could not compile Embree test code. \n"
-               "I attempted to find Embree headers in %s. \n"
+    fail_msg = ("I attempted to find Embree headers in %s. \n"
                "If this is not correct, please set your correct embree location \n"
                "using EMBREE_DIR environment variable or your embree.cfg file. \n"
                "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html "
-                "for more information." % rd)
+                "for more information. \n" % rd)
 
     # Create a temporary directory
     tmpdir = tempfile.mkdtemp()
@@ -110,23 +118,29 @@
             '}'
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
-                             stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-I%s/include/' % rd, filename], 
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+
+        if exit_code != 0:
+            print("Pyembree is installed, but I could not compile Embree test code.")
+            print("The error message was: ")
+            print(err)
+            print(fail_msg)
 
         # Clean up
         file.close()
 
     except OSError:
-        print(fail_msg)
+        print("read_embree_location() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
+        return False
 
     finally:
         os.chdir(curdir)
         shutil.rmtree(tmpdir)
 
-    if exit_code != 0:
-        print(fail_msg)
-
     return rd
 
 

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,6 +20,9 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gamer_000:
+    - yt/frontends/gamer/tests/test_outputs.py
+
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
@@ -44,7 +47,7 @@
   local_tipsy_000:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_000:
+  local_varia_001:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -394,7 +394,8 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / 
+                               resolution) ).clip(0, np.inf) ) ).astype('int')
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
             # a note to the user about which lines components are unresolved

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -31,24 +31,19 @@
     parallel_objects, \
     parallel_root_only
 from yt.utilities.physical_constants import speed_of_light_cgs
+from yt.data_objects.static_output import Dataset
 
 class LightRay(CosmologySplice):
     """
-    LightRay(parameter_filename, simulation_type=None,
-             near_redshift=None, far_redshift=None,
-             use_minimum_datasets=True, deltaz_min=0.0,
-             minimum_coherent_box_fraction=0.0,
-             time_data=True, redshift_data=True,
-             find_outputs=False, load_kwargs=None):
-
-    Create a LightRay object.  A light ray is much like a light cone,
-    in that it stacks together multiple datasets in order to extend a
-    redshift interval.  Unlike a light cone, which does randomly
-    oriented projections for each dataset, a light ray consists of
-    randomly oriented single rays.  The purpose of these is to create
-    synthetic QSO lines of sight.
-
-    Light rays can also be made from single datasets.
+    A LightRay object is a one-dimensional object representing the trajectory 
+    of a ray of light as it passes through one or more datasets (simple and
+    compound rays respectively).  One can sample any of the fields intersected
+    by the LightRay object as it passed through the dataset(s).
+    
+    For compound rays, the LightRay stacks together multiple datasets in a time
+    series in order to approximate a LightRay's path through a volume
+    and redshift interval larger than a single simulation data output.
+    The outcome is something akin to a synthetic QSO line of sight.
 
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
@@ -56,31 +51,32 @@
 
     Parameters
     ----------
-    parameter_filename : string
-        The path to the simulation parameter file or dataset.
+    parameter_filename : string or :class:`yt.data_objects.static_output.Dataset`
+        For simple rays, one may pass either a loaded dataset object or
+        the filename of a dataset.
+        For compound rays, one must pass the filename of the simulation
+        parameter file.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to
-        refer to a single dataset.
+        This refers to the simulation frontend type.  Do not use for simple 
+        rays.
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     use_minimum_datasets : optional, bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
         will contain as many entries as possible within the redshift
-        interval.
+        interval.  Do not use for simple rays.
         Default: True.
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
-        datasets in the returned list.
+        datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
         Used with use_minimum_datasets set to False, this parameter
@@ -88,23 +84,26 @@
         before rerandomizing the projection axis and center.  This
         was invented to allow light rays with thin slices to sample
         coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.
+        so well.  Try setting this parameter to 1 and see what happens.  
+        Do not use for simple rays.
         Default: 0.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     redshift_data : optional, bool
         Whether or not to include redshift outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     find_outputs : optional, bool
         Whether or not to search for datasets in the current
-        directory.
+        directory.  Do not use for simple rays.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load"
-        function, appropriate for use of certain frontends.  E.g.
+        If you are passing a filename of a dataset to LightRay rather than an 
+        already loaded dataset, then you can optionally provide this dictionary 
+        as keywords when the dataset is loaded by yt with the "load" function.
+        Necessary for use with certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
         Default : None
@@ -130,26 +129,37 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.
-        if simulation_type is None:
+        # The options here are:
+        # 1) User passed us a dataset: use it to make a simple ray
+        # 2) User passed us a dataset filename: use it to make a simple ray
+        # 3) User passed us a simulation filename: use it to make a compound ray
+
+        # Make a light ray from a single, given dataset: #1, #2
+        if simulation_type is None:     
             self.simulation_type = simulation_type
-            ds = load(parameter_filename, **self.load_kwargs)
-            if ds.cosmological_simulation:
-                redshift = ds.current_redshift
+            if isinstance(self.parameter_filename, Dataset):
+                self.ds = self.parameter_filename
+                self.parameter_filename = self.ds.basename
+            elif isinstance(self.parameter_filename, str):
+                self.ds = load(self.parameter_filename, **self.load_kwargs)
+            if self.ds.cosmological_simulation:
+                redshift = self.ds.current_redshift
                 self.cosmology = Cosmology(
-                    hubble_constant=ds.hubble_constant,
-                    omega_matter=ds.omega_matter,
-                    omega_lambda=ds.omega_lambda,
-                    unit_registry=ds.unit_registry)
+                    hubble_constant=self.ds.hubble_constant,
+                    omega_matter=self.ds.omega_matter,
+                    omega_lambda=self.ds.omega_lambda,
+                    unit_registry=self.ds.unit_registry)
             else:
                 redshift = 0.
-            self.light_ray_solution.append({"filename": parameter_filename,
+            self.light_ray_solution.append({"filename": self.parameter_filename,
                                             "redshift": redshift})
 
-        # Make a light ray from a simulation time-series.
+        # Make a light ray from a simulation time-series. #3
         else:
+            self.ds = None
+            assert isinstance(self.parameter_filename, str)
             # Get list of datasets for light ray solution.
-            CosmologySplice.__init__(self, parameter_filename, simulation_type,
+            CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
               self.create_cosmology_splice(self.near_redshift, self.far_redshift,
@@ -270,7 +280,7 @@
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
         the ray and the path length of the ray through those cells.
-        Light ray data can be written out to an hdf5 file.
+        Light ray data must be written out to an hdf5 file.
 
         Parameters
         ----------
@@ -383,8 +393,12 @@
                                                        storage=all_ray_storage,
                                                        njobs=njobs):
 
-            # Load dataset for segment.
-            ds = load(my_segment['filename'], **self.load_kwargs)
+            # In case of simple rays, use the already loaded dataset: self.ds, 
+            # otherwise, load dataset for segment.
+            if self.ds is None:
+                ds = load(my_segment['filename'], **self.load_kwargs)
+            else:
+                ds = self.ds
 
             my_segment['unique_identifier'] = ds.unique_identifier
             if redshift is not None:
@@ -555,7 +569,7 @@
         Write light ray data to hdf5 file.
         """
         if self.simulation_type is None:
-            ds = load(self.parameter_filename, **self.load_kwargs)
+            ds = self.ds
         else:
             ds = {}
             ds["dimensionality"] = self.simulation.dimensionality

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- /dev/null
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -0,0 +1,92 @@
+"""
+Unit test for the light_ray analysis module
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    requires_file
+from yt.analysis_modules.cosmological_observation.api import LightRay
+import os
+import shutil
+from yt.utilities.answer_testing.framework import data_dir_load
+import tempfile
+
+COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo():
+    """
+    This test generates a cosmological light ray
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo():
+    """
+    This test generates a non-cosmological light ray
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo_from_dataset():
+    """
+    This test generates a non-cosmological light ray created from an already
+    loaded dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(COSMO_PLUS_SINGLE)
+    lr = LightRay(ds)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -176,7 +176,7 @@
     constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
-        to use this.
+        to use this. It should be given in unit of solar metallicity.
         Default: None.
 
     This will create three fields:
@@ -245,7 +245,7 @@
 
     emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
@@ -253,7 +253,7 @@
 
     lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/s")
 
     def _photon_emissivity_field(field, data):
@@ -273,7 +273,7 @@
 
     phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="photons/cm**3/s")
 
     return emiss_name, lum_name, phot_name

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -65,6 +65,7 @@
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
     default_colormap = 'arbre',
+    ray_tracing_engine = 'embree',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -600,7 +600,8 @@
             ftype = self._last_freq[0] or ftype
         field = (ftype, fname)
         if field == self._last_freq:
-            return self._last_finfo
+            if field not in self.field_info.field_aliases.values():
+                return self._last_finfo
         if field in self.field_info:
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]

diff -r 560bd19f129415afe315135ffcc6213b7c11f8c7 -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -225,3 +225,12 @@
             else:
                 self._last_count = mask.sum()
         return mask
+
+    def select(self, selector, source, dest, offset):
+        mask = self._get_selector_mask(selector)
+        count = self.count(selector)
+        if count == 0: return 0
+        # Note: this likely will not work with vector fields.
+        dest[offset:offset+count] = source.flat[mask]
+        return count
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c85bf4e6efc1/
Changeset:   c85bf4e6efc1
Branch:      yt
User:        jzuhone
Date:        2016-05-16 23:21:23+00:00
Summary:     Merging
Affected #:  93 files

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -994,6 +994,17 @@
         cd ..
     fi
 
+    ( ${DEST_DIR}/bin/python -c "import _ssl" 2>&1 ) 1>> ${LOG_FILE}
+    RESULT=$?
+    if  [ $RESULT -ne 0 ]
+    then
+        echo "Unable to import the python SSL bindings."
+        echo "This means that OpenSSL is not installed or your system's OpenSSL"
+        echo "installation is out of date."
+        echo "Please install OpenSSL or set INST_CONDA=1"
+        do_exit
+    fi
+
     if [ $INST_PY3 -eq 1 ]
     then
         if [ ! -e $PYTHON3/done ]

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -3,8 +3,8 @@
 from yt.analysis_modules.cosmological_observation.api import \
     LightRay
 
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-lr = LightRay(fn)
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+lr = LightRay(ds)
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
@@ -17,7 +17,6 @@
 
 # Optionally, we can now overplot this ray on a projection of the source
 # dataset
-ds = yt.load(fn)
 p = yt.ProjectionPlot(ds, 'z', 'density')
 p.annotate_ray(lr)
 p.save()

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -34,7 +34,8 @@
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
 To get started, make a new directory in ``yt/frontends`` with the name
-of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+of your code and add the name into ``yt/frontends/api.py``.
+Copying the contents of the ``yt/frontends/_skeleton``
 directory will add a lot of boilerplate for the required classes and
 methods that are needed.  In particular, you'll have to create a
 subclass of ``Dataset`` in the data_structures.py file. This subclass

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 
@@ -532,7 +538,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1021,6 +1021,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
-
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+.. _conda-source-build:
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
-
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
 - ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
-
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+  $ pip install numpy matplotlib cython sympy
 
-.. code-block:: bash
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
-
-.. note::
-
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
-  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
-  if you are curious why ``--prefix=`` is neccessary on some systems.
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
+  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
+  if you are curious why ``--prefix=`` is neccessary on some systems.
 
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,6 +34,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -55,7 +55,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
@@ -113,24 +113,23 @@
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("redshift0058")
-   dd = ds.sphere("max", (200, "kpc"))
-   rho = 5e-27
-
-   bounds = [(dd.center[i] - 100.0/ds['kpc'],
-              dd.center[i] + 100.0/ds['kpc']) for i in range(3)]
+    import yt
+    from yt.units import kpc
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    dd = ds.sphere(ds.domain_center, (500, "kpc"))
+    rho = 1e-28
 
-   surf = ds.surface(dd, "density", rho)
+    bounds = [[dd.center[i] - 250*kpc, dd.center[i] + 250*kpc] for i in range(3)]
 
-   upload_id = surf.export_sketchfab(
-       title = "RD0058 - 5e-27",
-       description = "Extraction of Density (colored by Temperature) at 5e-27 " \
-                   + "g/cc from a galaxy formation simulation by Ryan Joung."
-       color_field = "temperature",
-       color_map = "hot",
-       color_log = True,
-       bounds = bounds
+    surf = ds.surface(dd, "density", rho)
+
+    upload_id = surf.export_sketchfab(
+        title="galaxy0030 - 1e-28",
+        description="Extraction of Density (colored by temperature) at 1e-28 g/cc",
+        color_field="temperature",
+        color_map="hot",
+        color_log=True,
+        bounds=bounds
    )
 
 and yt will extract a surface, convert to a format that Sketchfab.com
@@ -141,15 +140,13 @@
 
 .. raw:: html
 
-   <iframe frameborder="0" height="480" width="854" allowFullScreen
-   webkitallowfullscreen="true" mozallowfullscreen="true"
-   src="http://skfb.ly/l4jh2edcba?autostart=0&transparent=0&autospin=0&controls=1&watermark=1"></iframe>
+     <iframe width="640" height="480" src="https://sketchfab.com/models/ff59dacd55824110ad5bcc292371a514/embed" frameborder="0" allowfullscreen mozallowfullscreen="true" webkitallowfullscreen="true" onmousewheel=""></iframe>
 
 As a note, Sketchfab has a maximum model size of 50MB for the free account.
-50MB is pretty hefty, though, so it shouldn't be a problem for most needs.
-We're working on a way to optionally upload links to the Sketchfab models on
-the `yt Hub <https://hub.yt-project.org/>`_, but for now, if you want to share
-a cool model we'd love to see it!
+50MB is pretty hefty, though, so it shouldn't be a problem for most
+needs. Additionally, if you have an eligible e-mail address associated with a
+school or university, you can request a free professional account, which allows
+models up to 200MB. See https://sketchfab.com/education for details.
 
 OBJ and MTL Files
 -----------------
@@ -167,7 +164,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'
@@ -239,7 +236,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,14 +157,21 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.primitives",
+              ["yt/utilities/lib/primitives.pyx"],
+              libraries=std_libs, 
+              depends=["yt/utilities/lib/primitives.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/element_mappings.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/primitives.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -275,20 +282,30 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd"]),
+                  depends=["yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/mesh_intersection.pxd",
+                           "yt/utlilites/lib/mesh_samplers.pxd",
+                           "yt/utlilites/lib/mesh_traversal.pxd"]),
         Extension("yt.utilities.lib.mesh_traversal",
                   ["yt/utilities/lib/mesh_traversal.pyx"],
                   depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd"]),
+                           "yt/utilities/lib/grid_traversal.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
         Extension("yt.utilities.lib.mesh_samplers",
                   ["yt/utilities/lib/mesh_samplers.pyx"],
                   depends=["yt/utilities/lib/mesh_samplers.pxd",
                            "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/primitives.pxd"]),
         Extension("yt.utilities.lib.mesh_intersection",
                   ["yt/utilities/lib/mesh_intersection.pyx"],
                   depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/primitives.pxd",
+                           "yt/utilities/lib/vec3_ops.pxd"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -1,10 +1,11 @@
 import os
 from pkg_resources import resource_filename
 import shutil
-import subprocess
+from subprocess import Popen, PIPE
 import sys
 import tempfile
 
+
 def check_for_openmp():
     """Returns True if local setup supports OpenMP, False otherwise"""
 
@@ -37,13 +38,21 @@
             "}"
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-fopenmp', filename],
-                                        stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-fopenmp', filename],
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+        
+        if exit_code != 0:
+            print("Compilation of OpenMP test code failed with the error: ")
+            print(err)
+            print("Disabling OpenMP support. ")
 
         # Clean up
         file.close()
     except OSError:
+        print("check_for_openmp() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
         return False
     finally:
         os.chdir(curdir)
@@ -82,12 +91,11 @@
         except IOError:
             rd = '/usr/local'
 
-    fail_msg = ("Pyembree is installed, but I could not compile Embree test code. \n"
-               "I attempted to find Embree headers in %s. \n"
+    fail_msg = ("I attempted to find Embree headers in %s. \n"
                "If this is not correct, please set your correct embree location \n"
                "using EMBREE_DIR environment variable or your embree.cfg file. \n"
                "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html "
-                "for more information." % rd)
+                "for more information. \n" % rd)
 
     # Create a temporary directory
     tmpdir = tempfile.mkdtemp()
@@ -110,23 +118,29 @@
             '}'
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
-                             stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-I%s/include/' % rd, filename], 
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+
+        if exit_code != 0:
+            print("Pyembree is installed, but I could not compile Embree test code.")
+            print("The error message was: ")
+            print(err)
+            print(fail_msg)
 
         # Clean up
         file.close()
 
     except OSError:
-        print(fail_msg)
+        print("read_embree_location() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
+        return False
 
     finally:
         os.chdir(curdir)
         shutil.rmtree(tmpdir)
 
-    if exit_code != 0:
-        print(fail_msg)
-
     return rd
 
 

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,6 +20,9 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gamer_000:
+    - yt/frontends/gamer/tests/test_outputs.py
+
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
@@ -44,7 +47,7 @@
   local_tipsy_000:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_000:
+  local_varia_001:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -394,7 +394,8 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / 
+                               resolution) ).clip(0, np.inf) ) ).astype('int')
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
             # a note to the user about which lines components are unresolved

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -31,24 +31,19 @@
     parallel_objects, \
     parallel_root_only
 from yt.utilities.physical_constants import speed_of_light_cgs
+from yt.data_objects.static_output import Dataset
 
 class LightRay(CosmologySplice):
     """
-    LightRay(parameter_filename, simulation_type=None,
-             near_redshift=None, far_redshift=None,
-             use_minimum_datasets=True, deltaz_min=0.0,
-             minimum_coherent_box_fraction=0.0,
-             time_data=True, redshift_data=True,
-             find_outputs=False, load_kwargs=None):
-
-    Create a LightRay object.  A light ray is much like a light cone,
-    in that it stacks together multiple datasets in order to extend a
-    redshift interval.  Unlike a light cone, which does randomly
-    oriented projections for each dataset, a light ray consists of
-    randomly oriented single rays.  The purpose of these is to create
-    synthetic QSO lines of sight.
-
-    Light rays can also be made from single datasets.
+    A LightRay object is a one-dimensional object representing the trajectory 
+    of a ray of light as it passes through one or more datasets (simple and
+    compound rays respectively).  One can sample any of the fields intersected
+    by the LightRay object as it passed through the dataset(s).
+    
+    For compound rays, the LightRay stacks together multiple datasets in a time
+    series in order to approximate a LightRay's path through a volume
+    and redshift interval larger than a single simulation data output.
+    The outcome is something akin to a synthetic QSO line of sight.
 
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
@@ -56,31 +51,32 @@
 
     Parameters
     ----------
-    parameter_filename : string
-        The path to the simulation parameter file or dataset.
+    parameter_filename : string or :class:`yt.data_objects.static_output.Dataset`
+        For simple rays, one may pass either a loaded dataset object or
+        the filename of a dataset.
+        For compound rays, one must pass the filename of the simulation
+        parameter file.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to
-        refer to a single dataset.
+        This refers to the simulation frontend type.  Do not use for simple 
+        rays.
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     use_minimum_datasets : optional, bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
         will contain as many entries as possible within the redshift
-        interval.
+        interval.  Do not use for simple rays.
         Default: True.
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
-        datasets in the returned list.
+        datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
         Used with use_minimum_datasets set to False, this parameter
@@ -88,23 +84,26 @@
         before rerandomizing the projection axis and center.  This
         was invented to allow light rays with thin slices to sample
         coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.
+        so well.  Try setting this parameter to 1 and see what happens.  
+        Do not use for simple rays.
         Default: 0.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     redshift_data : optional, bool
         Whether or not to include redshift outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     find_outputs : optional, bool
         Whether or not to search for datasets in the current
-        directory.
+        directory.  Do not use for simple rays.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load"
-        function, appropriate for use of certain frontends.  E.g.
+        If you are passing a filename of a dataset to LightRay rather than an 
+        already loaded dataset, then you can optionally provide this dictionary 
+        as keywords when the dataset is loaded by yt with the "load" function.
+        Necessary for use with certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
         Default : None
@@ -130,26 +129,37 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.
-        if simulation_type is None:
+        # The options here are:
+        # 1) User passed us a dataset: use it to make a simple ray
+        # 2) User passed us a dataset filename: use it to make a simple ray
+        # 3) User passed us a simulation filename: use it to make a compound ray
+
+        # Make a light ray from a single, given dataset: #1, #2
+        if simulation_type is None:     
             self.simulation_type = simulation_type
-            ds = load(parameter_filename, **self.load_kwargs)
-            if ds.cosmological_simulation:
-                redshift = ds.current_redshift
+            if isinstance(self.parameter_filename, Dataset):
+                self.ds = self.parameter_filename
+                self.parameter_filename = self.ds.basename
+            elif isinstance(self.parameter_filename, str):
+                self.ds = load(self.parameter_filename, **self.load_kwargs)
+            if self.ds.cosmological_simulation:
+                redshift = self.ds.current_redshift
                 self.cosmology = Cosmology(
-                    hubble_constant=ds.hubble_constant,
-                    omega_matter=ds.omega_matter,
-                    omega_lambda=ds.omega_lambda,
-                    unit_registry=ds.unit_registry)
+                    hubble_constant=self.ds.hubble_constant,
+                    omega_matter=self.ds.omega_matter,
+                    omega_lambda=self.ds.omega_lambda,
+                    unit_registry=self.ds.unit_registry)
             else:
                 redshift = 0.
-            self.light_ray_solution.append({"filename": parameter_filename,
+            self.light_ray_solution.append({"filename": self.parameter_filename,
                                             "redshift": redshift})
 
-        # Make a light ray from a simulation time-series.
+        # Make a light ray from a simulation time-series. #3
         else:
+            self.ds = None
+            assert isinstance(self.parameter_filename, str)
             # Get list of datasets for light ray solution.
-            CosmologySplice.__init__(self, parameter_filename, simulation_type,
+            CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
               self.create_cosmology_splice(self.near_redshift, self.far_redshift,
@@ -270,7 +280,7 @@
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
         the ray and the path length of the ray through those cells.
-        Light ray data can be written out to an hdf5 file.
+        Light ray data must be written out to an hdf5 file.
 
         Parameters
         ----------
@@ -383,8 +393,12 @@
                                                        storage=all_ray_storage,
                                                        njobs=njobs):
 
-            # Load dataset for segment.
-            ds = load(my_segment['filename'], **self.load_kwargs)
+            # In case of simple rays, use the already loaded dataset: self.ds, 
+            # otherwise, load dataset for segment.
+            if self.ds is None:
+                ds = load(my_segment['filename'], **self.load_kwargs)
+            else:
+                ds = self.ds
 
             my_segment['unique_identifier'] = ds.unique_identifier
             if redshift is not None:
@@ -555,7 +569,7 @@
         Write light ray data to hdf5 file.
         """
         if self.simulation_type is None:
-            ds = load(self.parameter_filename, **self.load_kwargs)
+            ds = self.ds
         else:
             ds = {}
             ds["dimensionality"] = self.simulation.dimensionality

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- /dev/null
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -0,0 +1,92 @@
+"""
+Unit test for the light_ray analysis module
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    requires_file
+from yt.analysis_modules.cosmological_observation.api import LightRay
+import os
+import shutil
+from yt.utilities.answer_testing.framework import data_dir_load
+import tempfile
+
+COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo():
+    """
+    This test generates a cosmological light ray
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo():
+    """
+    This test generates a non-cosmological light ray
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo_from_dataset():
+    """
+    This test generates a non-cosmological light ray created from an already
+    loaded dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(COSMO_PLUS_SINGLE)
+    lr = LightRay(ds)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -176,7 +176,7 @@
     constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
-        to use this.
+        to use this. It should be given in unit of solar metallicity.
         Default: None.
 
     This will create three fields:
@@ -245,7 +245,7 @@
 
     emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
@@ -253,7 +253,7 @@
 
     lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/s")
 
     def _photon_emissivity_field(field, data):
@@ -273,7 +273,7 @@
 
     phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="photons/cm**3/s")
 
     return emiss_name, lum_name, phot_name

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -65,6 +65,7 @@
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
     default_colormap = 'arbre',
+    ray_tracing_engine = 'embree',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -600,7 +600,8 @@
             ftype = self._last_freq[0] or ftype
         field = (ftype, fname)
         if field == self._last_freq:
-            return self._last_finfo
+            if field not in self.field_info.field_aliases.values():
+                return self._last_finfo
         if field in self.field_info:
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]

diff -r 57bcd94ed3be5fcb6e40876efdb7eb0955d8b010 -r c85bf4e6efc17824fca33a23ebf162889e5a523f yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -225,3 +225,12 @@
             else:
                 self._last_count = mask.sum()
         return mask
+
+    def select(self, selector, source, dest, offset):
+        mask = self._get_selector_mask(selector)
+        count = self.count(selector)
+        if count == 0: return 0
+        # Note: this likely will not work with vector fields.
+        dest[offset:offset+count] = source.flat[mask]
+        return count
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/39587bc119b1/
Changeset:   39587bc119b1
Branch:      yt
User:        jzuhone
Date:        2016-05-16 23:30:46+00:00
Summary:     Merge
Affected #:  2 files

diff -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 -r 39587bc119b113ac335f63954f5cf0ddab20ab4a yt/utilities/lib/primitives.pxd
--- a/yt/utilities/lib/primitives.pxd
+++ b/yt/utilities/lib/primitives.pxd
@@ -32,17 +32,17 @@
 
 cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil
 
-cdef inline np.int64_t ray_triangle_intersect(const void* primitives,
-                                              const np.int64_t item,
-                                              Ray* ray) nogil
+cdef np.int64_t ray_triangle_intersect(const void* primitives,
+                                       const np.int64_t item,
+                                       Ray* ray) nogil
 
-cdef inline void triangle_centroid(const void *primitives,
-                                   const np.int64_t item,
-                                   np.float64_t[3] centroid) nogil
+cdef void triangle_centroid(const void *primitives,
+                            const np.int64_t item,
+                            np.float64_t[3] centroid) nogil
 
-cdef inline void triangle_bbox(const void *primitives,
-                               const np.int64_t item,
-                               BBox* bbox) nogil
+cdef void triangle_bbox(const void *primitives,
+                        const np.int64_t item,
+                        BBox* bbox) nogil
 
 cdef struct Patch:
     np.float64_t[8][3] v  # 8 vertices per patch
@@ -67,14 +67,14 @@
                                   cython.floating[3] ray_origin,
                                   cython.floating[3] ray_direction) nogil
     
-cdef inline np.int64_t ray_patch_intersect(const void* primitives,
-                                           const np.int64_t item,
-                                           Ray* ray) nogil
+cdef np.int64_t ray_patch_intersect(const void* primitives,
+                                    const np.int64_t item,
+                                    Ray* ray) nogil
 
-cdef inline void patch_centroid(const void *primitives,
-                                const np.int64_t item,
-                                np.float64_t[3] centroid) nogil
+cdef void patch_centroid(const void *primitives,
+                         const np.int64_t item,
+                         np.float64_t[3] centroid) nogil
 
-cdef inline void patch_bbox(const void *primitives,
-                            const np.int64_t item,
-                            BBox* bbox) nogil
+cdef void patch_bbox(const void *primitives,
+                     const np.int64_t item,
+                     BBox* bbox) nogil

diff -r c98eaac166e7878e4db6b7cbff8a547f38ae8609 -r 39587bc119b113ac335f63954f5cf0ddab20ab4a yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -35,9 +35,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline np.int64_t ray_triangle_intersect(const void* primitives,
-                                              const np.int64_t item,
-                                              Ray* ray) nogil:
+cdef np.int64_t ray_triangle_intersect(const void* primitives,
+                                       const np.int64_t item,
+                                       Ray* ray) nogil:
 # https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
 
     cdef Triangle tri = (<Triangle*> primitives)[item]
@@ -84,9 +84,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void triangle_centroid(const void *primitives,
-                                   const np.int64_t item,
-                                   np.float64_t[3] centroid) nogil:
+cdef void triangle_centroid(const void *primitives,
+                            const np.int64_t item,
+                            np.float64_t[3] centroid) nogil:
         
     cdef Triangle tri = (<Triangle*> primitives)[item]
     cdef np.int64_t i
@@ -97,9 +97,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void triangle_bbox(const void *primitives,
-                               const np.int64_t item,
-                               BBox* bbox) nogil:
+cdef void triangle_bbox(const void *primitives,
+                        const np.int64_t item,
+                        BBox* bbox) nogil:
 
     cdef Triangle tri = (<Triangle*> primitives)[item]
     cdef np.int64_t i
@@ -240,9 +240,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline np.int64_t ray_patch_intersect(const void* primitives,
-                                           const np.int64_t item,
-                                           Ray* ray) nogil:
+cdef np.int64_t ray_patch_intersect(const void* primitives,
+                                    const np.int64_t item,
+                                    Ray* ray) nogil:
 
     cdef Patch patch = (<Patch*> primitives)[item]
 
@@ -264,9 +264,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void patch_centroid(const void *primitives,
-                                const np.int64_t item,
-                                np.float64_t[3] centroid) nogil:
+cdef void patch_centroid(const void *primitives,
+                         const np.int64_t item,
+                         np.float64_t[3] centroid) nogil:
 
     cdef np.int64_t i, j
     cdef Patch patch = (<Patch*> primitives)[item]
@@ -285,9 +285,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void patch_bbox(const void *primitives,
-                            const np.int64_t item,
-                            BBox* bbox) nogil:
+cdef void patch_bbox(const void *primitives,
+                    const np.int64_t item,
+                     BBox* bbox) nogil:
 
     cdef np.int64_t i, j
     cdef Patch patch = (<Patch*> primitives)[item]


https://bitbucket.org/yt_analysis/yt/commits/a0d6a1d37388/
Changeset:   a0d6a1d37388
Branch:      yt
User:        jzuhone
Date:        2016-05-09 17:39:11+00:00
Summary:     Merge
Affected #:  46 files

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -532,7 +532,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
-
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+.. _conda-source-build:
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
-
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
 - ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
-
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+  $ pip install numpy matplotlib cython sympy
 
-.. code-block:: bash
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
-
-.. note::
-
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
-  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
-  if you are curious why ``--prefix=`` is neccessary on some systems.
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
+  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
+  if you are curious why ``--prefix=`` is neccessary on some systems.
 
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -394,7 +394,8 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / 
+                               resolution) ).clip(0, np.inf) ) ).astype('int')
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
             # a note to the user about which lines components are unresolved

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -176,7 +176,7 @@
     constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
-        to use this.
+        to use this. It should be given in unit of solar metallicity.
         Default: None.
 
     This will create three fields:
@@ -245,7 +245,7 @@
 
     emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
@@ -253,7 +253,7 @@
 
     lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/s")
 
     def _photon_emissivity_field(field, data):
@@ -273,7 +273,7 @@
 
     phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="photons/cm**3/s")
 
     return emiss_name, lum_name, phot_name

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/extern/functools32.py
--- a/yt/extern/functools32.py
+++ /dev/null
@@ -1,423 +0,0 @@
-"""functools.py - Tools for working with functions and callable objects
-"""
-# Python module wrapper for _functools C module
-# to allow utilities written in Python to be added
-# to the functools module.
-# Written by Nick Coghlan <ncoghlan at gmail.com>
-# and Raymond Hettinger <python at rcn.com>
-#   Copyright (C) 2006-2010 Python Software Foundation.
-# See C source code for _functools credits/copyright
-
-__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
-           'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
-
-from _functools import partial, reduce
-from collections import MutableMapping, namedtuple
-from .reprlib32 import recursive_repr as _recursive_repr
-from weakref import proxy as _proxy
-import sys as _sys
-try:
-    from _thread import allocate_lock as Lock
-except:
-    from ._dummy_thread32 import allocate_lock as Lock
-
-################################################################################
-### OrderedDict
-################################################################################
-
-class _Link(object):
-    __slots__ = 'prev', 'next', 'key', '__weakref__'
-
-class OrderedDict(dict):
-    'Dictionary that remembers insertion order'
-    # An inherited dict maps keys to values.
-    # The inherited dict provides __getitem__, __len__, __contains__, and get.
-    # The remaining methods are order-aware.
-    # Big-O running times for all methods are the same as regular dictionaries.
-
-    # The internal self.__map dict maps keys to links in a doubly linked list.
-    # The circular doubly linked list starts and ends with a sentinel element.
-    # The sentinel element never gets deleted (this simplifies the algorithm).
-    # The sentinel is in self.__hardroot with a weakref proxy in self.__root.
-    # The prev links are weakref proxies (to prevent circular references).
-    # Individual links are kept alive by the hard reference in self.__map.
-    # Those hard references disappear when a key is deleted from an OrderedDict.
-
-    def __init__(self, *args, **kwds):
-        '''Initialize an ordered dictionary.  The signature is the same as
-        regular dictionaries, but keyword arguments are not recommended because
-        their insertion order is arbitrary.
-
-        '''
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__root
-        except AttributeError:
-            self.__hardroot = _Link()
-            self.__root = root = _proxy(self.__hardroot)
-            root.prev = root.next = root
-            self.__map = {}
-        self.__update(*args, **kwds)
-
-    def __setitem__(self, key, value,
-                    dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
-        'od.__setitem__(i, y) <==> od[i]=y'
-        # Setting a new item creates a new link at the end of the linked list,
-        # and the inherited dictionary is updated with the new key/value pair.
-        if key not in self:
-            self.__map[key] = link = Link()
-            root = self.__root
-            last = root.prev
-            link.prev, link.next, link.key = last, root, key
-            last.next = link
-            root.prev = proxy(link)
-        dict_setitem(self, key, value)
-
-    def __delitem__(self, key, dict_delitem=dict.__delitem__):
-        'od.__delitem__(y) <==> del od[y]'
-        # Deleting an existing item uses self.__map to find the link which gets
-        # removed by updating the links in the predecessor and successor nodes.
-        dict_delitem(self, key)
-        link = self.__map.pop(key)
-        link_prev = link.prev
-        link_next = link.next
-        link_prev.next = link_next
-        link_next.prev = link_prev
-
-    def __iter__(self):
-        'od.__iter__() <==> iter(od)'
-        # Traverse the linked list in order.
-        root = self.__root
-        curr = root.next
-        while curr is not root:
-            yield curr.key
-            curr = curr.next
-
-    def __reversed__(self):
-        'od.__reversed__() <==> reversed(od)'
-        # Traverse the linked list in reverse order.
-        root = self.__root
-        curr = root.prev
-        while curr is not root:
-            yield curr.key
-            curr = curr.prev
-
-    def clear(self):
-        'od.clear() -> None.  Remove all items from od.'
-        root = self.__root
-        root.prev = root.next = root
-        self.__map.clear()
-        dict.clear(self)
-
-    def popitem(self, last=True):
-        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
-        Pairs are returned in LIFO order if last is true or FIFO order if false.
-
-        '''
-        if not self:
-            raise KeyError('dictionary is empty')
-        root = self.__root
-        if last:
-            link = root.prev
-            link_prev = link.prev
-            link_prev.next = root
-            root.prev = link_prev
-        else:
-            link = root.next
-            link_next = link.next
-            root.next = link_next
-            link_next.prev = root
-        key = link.key
-        del self.__map[key]
-        value = dict.pop(self, key)
-        return key, value
-
-    def move_to_end(self, key, last=True):
-        '''Move an existing element to the end (or beginning if last==False).
-
-        Raises KeyError if the element does not exist.
-        When last=True, acts like a fast version of self[key]=self.pop(key).
-
-        '''
-        link = self.__map[key]
-        link_prev = link.prev
-        link_next = link.next
-        link_prev.next = link_next
-        link_next.prev = link_prev
-        root = self.__root
-        if last:
-            last = root.prev
-            link.prev = last
-            link.next = root
-            last.next = root.prev = link
-        else:
-            first = root.next
-            link.prev = root
-            link.next = first
-            root.next = first.prev = link
-
-    def __sizeof__(self):
-        sizeof = _sys.getsizeof
-        n = len(self) + 1                       # number of links including root
-        size = sizeof(self.__dict__)            # instance dictionary
-        size += sizeof(self.__map) * 2          # internal dict and inherited dict
-        size += sizeof(self.__hardroot) * n     # link objects
-        size += sizeof(self.__root) * n         # proxy objects
-        return size
-
-    update = __update = MutableMapping.update
-    keys = MutableMapping.keys
-    values = MutableMapping.values
-    items = MutableMapping.items
-    __ne__ = MutableMapping.__ne__
-
-    __marker = object()
-
-    def pop(self, key, default=__marker):
-        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
-        value.  If key is not found, d is returned if given, otherwise KeyError
-        is raised.
-
-        '''
-        if key in self:
-            result = self[key]
-            del self[key]
-            return result
-        if default is self.__marker:
-            raise KeyError(key)
-        return default
-
-    def setdefault(self, key, default=None):
-        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
-        if key in self:
-            return self[key]
-        self[key] = default
-        return default
-
-    @_recursive_repr()
-    def __repr__(self):
-        'od.__repr__() <==> repr(od)'
-        if not self:
-            return '%s()' % (self.__class__.__name__,)
-        return '%s(%r)' % (self.__class__.__name__, list(self.items()))
-
-    def __reduce__(self):
-        'Return state information for pickling'
-        items = [[k, self[k]] for k in self]
-        inst_dict = vars(self).copy()
-        for k in vars(OrderedDict()):
-            inst_dict.pop(k, None)
-        if inst_dict:
-            return (self.__class__, (items,), inst_dict)
-        return self.__class__, (items,)
-
-    def copy(self):
-        'od.copy() -> a shallow copy of od'
-        return self.__class__(self)
-
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
-        If not specified, the value defaults to None.
-
-        '''
-        self = cls()
-        for key in iterable:
-            self[key] = value
-        return self
-
-    def __eq__(self, other):
-        '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
-        while comparison to a regular mapping is order-insensitive.
-
-        '''
-        if isinstance(other, OrderedDict):
-            return len(self)==len(other) and \
-                   all(p==q for p, q in zip(self.items(), other.items()))
-        return dict.__eq__(self, other)
-
-# update_wrapper() and wraps() are tools to help write
-# wrapper functions that can handle naive introspection
-
-WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
-WRAPPER_UPDATES = ('__dict__',)
-def update_wrapper(wrapper,
-                   wrapped,
-                   assigned = WRAPPER_ASSIGNMENTS,
-                   updated = WRAPPER_UPDATES):
-    """Update a wrapper function to look like the wrapped function
-
-       wrapper is the function to be updated
-       wrapped is the original function
-       assigned is a tuple naming the attributes assigned directly
-       from the wrapped function to the wrapper function (defaults to
-       functools.WRAPPER_ASSIGNMENTS)
-       updated is a tuple naming the attributes of the wrapper that
-       are updated with the corresponding attribute from the wrapped
-       function (defaults to functools.WRAPPER_UPDATES)
-    """
-    wrapper.__wrapped__ = wrapped
-    for attr in assigned:
-        try:
-            value = getattr(wrapped, attr)
-        except AttributeError:
-            pass
-        else:
-            setattr(wrapper, attr, value)
-    for attr in updated:
-        getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
-    # Return the wrapper so this can be used as a decorator via partial()
-    return wrapper
-
-def wraps(wrapped,
-          assigned = WRAPPER_ASSIGNMENTS,
-          updated = WRAPPER_UPDATES):
-    """Decorator factory to apply update_wrapper() to a wrapper function
-
-       Returns a decorator that invokes update_wrapper() with the decorated
-       function as the wrapper argument and the arguments to wraps() as the
-       remaining arguments. Default arguments are as for update_wrapper().
-       This is a convenience function to simplify applying partial() to
-       update_wrapper().
-    """
-    return partial(update_wrapper, wrapped=wrapped,
-                   assigned=assigned, updated=updated)
-
-def total_ordering(cls):
-    """Class decorator that fills in missing ordering methods"""
-    convert = {
-        '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
-                   ('__le__', lambda self, other: self < other or self == other),
-                   ('__ge__', lambda self, other: not self < other)],
-        '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
-                   ('__lt__', lambda self, other: self <= other and not self == other),
-                   ('__gt__', lambda self, other: not self <= other)],
-        '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
-                   ('__ge__', lambda self, other: self > other or self == other),
-                   ('__le__', lambda self, other: not self > other)],
-        '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
-                   ('__gt__', lambda self, other: self >= other and not self == other),
-                   ('__lt__', lambda self, other: not self >= other)]
-    }
-    roots = set(dir(cls)) & set(convert)
-    if not roots:
-        raise ValueError('must define at least one ordering operation: < ><= >=')
-    root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
-    for opname, opfunc in convert[root]:
-        if opname not in roots:
-            opfunc.__name__ = opname
-            opfunc.__doc__ = getattr(int, opname).__doc__
-            setattr(cls, opname, opfunc)
-    return cls
-
-def cmp_to_key(mycmp):
-    """Convert a cmp= function into a key= function"""
-    class K(object):
-        __slots__ = ['obj']
-        def __init__(self, obj):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) < 0
-        def __gt__(self, other):
-            return mycmp(self.obj, other.obj) > 0
-        def __eq__(self, other):
-            return mycmp(self.obj, other.obj) == 0
-        def __le__(self, other):
-            return mycmp(self.obj, other.obj) <= 0
-        def __ge__(self, other):
-            return mycmp(self.obj, other.obj) >= 0
-        def __ne__(self, other):
-            return mycmp(self.obj, other.obj) != 0
-        __hash__ = None
-    return K
-
-_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
-
-def lru_cache(maxsize=100):
-    """Least-recently-used cache decorator.
-
-    If *maxsize* is set to None, the LRU features are disabled and the cache
-    can grow without bound.
-
-    Arguments to the cached function must be hashable.
-
-    View the cache statistics named tuple (hits, misses, maxsize, currsize) with
-    f.cache_info().  Clear the cache and statistics with f.cache_clear().
-    Access the underlying function with f.__wrapped__.
-
-    See:  http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
-
-    """
-    # Users should only access the lru_cache through its public API:
-    #       cache_info, cache_clear, and f.__wrapped__
-    # The internals of the lru_cache are encapsulated for thread safety and
-    # to allow the implementation to change (including a possible C version).
-
-    def decorating_function(user_function,
-                tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
-
-        hits, misses = [0], [0]
-        kwd_mark = (object(),)          # separates positional and keyword args
-        lock = Lock()                   # needed because OrderedDict isn't threadsafe
-
-        if maxsize is None:
-            cache = dict()              # simple cache without ordering or size limit
-
-            @wraps(user_function)
-            def wrapper(*args, **kwds):
-                key = args
-                if kwds:
-                    key += kwd_mark + tuple(sorted(kwds.items()))
-                try:
-                    result = cache[key]
-                    hits[0] += 1
-                    return result
-                except KeyError:
-                    pass
-                result = user_function(*args, **kwds)
-                cache[key] = result
-                misses[0] += 1
-                return result
-        else:
-            cache = OrderedDict()           # ordered least recent to most recent
-            cache_popitem = cache.popitem
-            cache_renew = cache.move_to_end
-
-            @wraps(user_function)
-            def wrapper(*args, **kwds):
-                key = args
-                if kwds:
-                    key += kwd_mark + tuple(sorted(kwds.items()))
-                with lock:
-                    try:
-                        result = cache[key]
-                        cache_renew(key)    # record recent use of this key
-                        hits[0] += 1
-                        return result
-                    except KeyError:
-                        pass
-                result = user_function(*args, **kwds)
-                with lock:
-                    cache[key] = result     # record recent use of this key
-                    misses[0] += 1
-                    if len(cache) > maxsize:
-                        cache_popitem(0)    # purge least recently used cache entry
-                return result
-
-        def cache_info():
-            """Report cache statistics"""
-            with lock:
-                return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
-
-        def cache_clear():
-            """Clear the cache and cache statistics"""
-            with lock:
-                cache.clear()
-                hits[0] = misses[0] = 0
-
-        wrapper.cache_info = cache_info
-        wrapper.cache_clear = cache_clear
-        return wrapper
-
-    return decorating_function

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -79,8 +79,8 @@
     ("TangentialVelocity",               "tangential_velocity"),
     ("CuttingPlaneVelocityX",            "cutting_plane_velocity_x"),
     ("CuttingPlaneVelocityY",            "cutting_plane_velocity_y"),
-    ("CuttingPlaneBX",                   "cutting_plane_bx"),
-    ("CuttingPlaneBy",                   "cutting_plane_by"),
+    ("CuttingPlaneBX",                   "cutting_plane_magnetic_field_x"),
+    ("CuttingPlaneBy",                   "cutting_plane_magnetic_field_y"),
     ("MeanMolecularWeight",              "mean_molecular_weight"),
     ("particle_density",                 "particle_density"),
     ("ThermalEnergy",                    "thermal_energy"),

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -54,6 +54,7 @@
     unit_system = registry.ds.unit_system
 
     create_vector_fields(registry, "velocity", unit_system["velocity"], ftype, slice_info)
+    create_vector_fields(registry, "magnetic_field", unit_system["magnetic_field"], ftype, slice_info)
 
     def _cell_mass(field, data):
         return data[ftype, "density"] * data[ftype, "cell_volume"]

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -25,7 +25,10 @@
 class LocalFieldInfoContainer(FieldInfoContainer):
     def add_field(self, name, function=None, **kwargs):
         if not isinstance(name, tuple):
-            name = ('gas', name)
+            if kwargs.setdefault('particle_type', False):
+                name = ('all', name)
+            else:
+                name = ('gas', name)
         override = kwargs.get("force_override", False)
         # Handle the case where the field has already been added.
         if not override and name in self:

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -183,13 +183,15 @@
     def _cp_vectors(ax):
         def _cp_val(field, data):
             vec = data.get_field_parameter("cp_%s_vec" % (ax))
-            bv = data.get_field_parameter("bulk_%s" % basename)
-            if bv is None: bv = np.zeros(3)
-            tr  = (data[xn] - bv[0]) * vec[0]
-            tr += (data[yn] - bv[1]) * vec[1]
-            tr += (data[zn] - bv[2]) * vec[2]
+            bv = data.get_field_parameter("bulk_%s" % basename, None)
+            if bv is None:
+                bv = data.ds.arr(np.zeros(3), data[xn].units)
+            tr  = (data[xn] - bv[0]) * vec.d[0]
+            tr += (data[yn] - bv[1]) * vec.d[1]
+            tr += (data[zn] - bv[2]) * vec.d[2]
             return tr
         return _cp_val
+
     registry.add_field((ftype, "cutting_plane_%s_x" % basename),
                        function=_cp_vectors('x'),
                        units=field_units)

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -16,7 +16,6 @@
 from yt.fields.derived_field import ValidateSpatial
 from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _astropy
-from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
 if PY3:
@@ -258,6 +257,7 @@
         self.pw.save(name=name, mpl_kwargs=mpl_kwargs)
 
     def _repr_html_(self):
+        from yt.visualization._mpl_imports import FigureCanvasAgg
         ret = ''
         for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -195,12 +195,26 @@
         self.particle_filename = particle_filename
 
         if self.particle_filename is None:
-            self._particle_handle = self._handle
+            # try to guess the particle filename
+            try:
+                self._particle_handle = HDF5FileHandler(filename.replace('plt_cnt', 'part'))
+                self.particle_filename = filename.replace('plt_cnt', 'part')
+                mylog.info('Particle file found: %s' % self.particle_filename.split('/')[-1])
+            except IOError:
+                self._particle_handle = self._handle
         else:
+            # particle_filename is specified by user
             try:
                 self._particle_handle = HDF5FileHandler(self.particle_filename)
             except:
                 raise IOError(self.particle_filename)
+        # Check if the particle file has the same time
+        if self._particle_handle != self._handle:
+            part_time = self._particle_handle.handle.get('real scalars')[0][1]
+            plot_time = self._handle.handle.get('real scalars')[0][1]
+            if not np.isclose(part_time, plot_time):
+                raise IOError('%s and  %s are not at the same time.' % (self.particle_filename, filename))
+
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/http_stream/data_structures.py
--- a/yt/frontends/http_stream/data_structures.py
+++ b/yt/frontends/http_stream/data_structures.py
@@ -15,6 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import json
 import numpy as np
 import time
 
@@ -24,15 +25,11 @@
     ParticleDataset
 from yt.frontends.sph.fields import \
     SPHFieldInfo
+from yt.funcs import \
+    get_requests
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 
-try:
-    import requests
-    import json
-except ImportError:
-    requests = None
-
 class HTTPParticleFile(ParticleFile):
     pass
 
@@ -49,8 +46,9 @@
                  dataset_type = "http_particle_stream",
                  n_ref = 64, over_refine_factor=1, 
                  unit_system="cgs"):
-        if requests is None:
-            raise RuntimeError
+        if get_requests() is None:
+            raise ImportError(
+                "This functionality depends on the requests package")
         self.base_url = base_url
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
@@ -66,6 +64,7 @@
         self.parameters["HydroMethod"] = "sph"
 
         # Here's where we're going to grab the JSON index file
+        requests = get_requests()
         hreq = requests.get(self.base_url + "/yt_index.json")
         if hreq.status_code != 200:
             raise RuntimeError
@@ -108,6 +107,9 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].startswith("http://"):
             return False
+        requests = get_requests()
+        if requests is None:
+            return False
         hreq = requests.get(args[0] + "/yt_index.json")
         if hreq.status_code == 200:
             return True

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/http_stream/io.py
--- a/yt/frontends/http_stream/io.py
+++ b/yt/frontends/http_stream/io.py
@@ -18,24 +18,21 @@
 import numpy as np
 
 from yt.funcs import \
+    get_requests, \
     mylog
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 
-try:
-    import requests
-except ImportError:
-    requests = None
-
 class IOHandlerHTTPStream(BaseIOHandler):
     _dataset_type = "http_particle_stream"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     def __init__(self, ds):
-        if requests is None:
-            raise RuntimeError
+        if get_requests() is None:
+            raise ImportError(
+                "This functionality depends on the requests package")
         self._url = ds.base_url
         # This should eventually manage the IO and cache it
         self.total_bytes = 0
@@ -47,6 +44,7 @@
         s = "%s/%s/%s/%s" % (self._url,
             data_file.file_id, ftype, fname)
         mylog.info("Loading URL %s", s)
+        requests = get_requests()
         resp = requests.get(s)
         if resp.status_code != 200:
             raise RuntimeError

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/owls/fields.py
--- a/yt/frontends/owls/fields.py
+++ b/yt/frontends/owls/fields.py
@@ -222,41 +222,40 @@
         """ returns a function that calculates the ion density of a particle. 
         """ 
 
-        def _ion_density(field, data):
-
-            # get element symbol from ion string. ion string will 
-            # be a member of the tuple _ions (i.e. si13)
-            #--------------------------------------------------------
-            if ion[0:2].isalpha():
-                symbol = ion[0:2].capitalize()
-            else:
-                symbol = ion[0:1].capitalize()
-
-            # mass fraction for the element
-            #--------------------------------------------------------
-            m_frac = data[ftype, symbol+"_fraction"]
+        def get_owls_ion_density_field(ion, ftype, itab):
+            def _func(field, data):
 
-            # get nH and T for lookup
-            #--------------------------------------------------------
-            log_nH = np.log10( data["PartType0", "H_number_density"] )
-            log_T = np.log10( data["PartType0", "Temperature"] )
-
-            # get name of owls_ion_file for given ion
-            #--------------------------------------------------------
-            owls_ion_path = self._get_owls_ion_data_dir()
-            fname = os.path.join( owls_ion_path, ion+".hdf5" )
+                # get element symbol from ion string. ion string will 
+                # be a member of the tuple _ions (i.e. si13)
+                #--------------------------------------------------------
+                if ion[0:2].isalpha():
+                    symbol = ion[0:2].capitalize()
+                else:
+                    symbol = ion[0:1].capitalize()
 
-            # create ionization table for this redshift
-            #--------------------------------------------------------
-            itab = oit.IonTableOWLS( fname )
-            itab.set_iz( data.ds.current_redshift )
+                # mass fraction for the element
+                #--------------------------------------------------------
+                m_frac = data[ftype, symbol+"_fraction"]
 
-            # find ion balance using log nH and log T
-            #--------------------------------------------------------
-            i_frac = itab.interp( log_nH, log_T )
-            return data[ftype,"Density"] * m_frac * i_frac 
-        
-        return _ion_density
+                # get nH and T for lookup
+                #--------------------------------------------------------
+                log_nH = np.log10( data["PartType0", "H_number_density"] )
+                log_T = np.log10( data["PartType0", "Temperature"] )
+
+                # get name of owls_ion_file for given ion
+                #--------------------------------------------------------
+                itab.set_iz( data.ds.current_redshift )
+
+                # find ion balance using log nH and log T
+                #--------------------------------------------------------
+                i_frac = itab.interp( log_nH, log_T )
+                return data[ftype,"Density"] * m_frac * i_frac 
+            return _func
+            
+        ion_path = self._get_owls_ion_data_dir()
+        fname = os.path.join( ion_path, ion+".hdf5" )
+        itab = oit.IonTableOWLS( fname )
+        return get_owls_ion_density_field(ion, ftype, itab)
 
 
 

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/owls/owls_ion_tables.py
--- a/yt/frontends/owls/owls_ion_tables.py
+++ b/yt/frontends/owls/owls_ion_tables.py
@@ -1,8 +1,8 @@
-""" 
+"""
 OWLS ion tables
 
 A module to handle the HM01 UV background spectra and ionization data from the
-OWLS photoionization equilibrium lookup tables. 
+OWLS photoionization equilibrium lookup tables.
 
 
 
@@ -17,27 +17,28 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.on_demand_imports import _h5py as h5py
+import yt.extern.six as six
 import numpy as np
 
 
 
 
-def h5rd( fname, path, dtype=None ):
+def h5rd(fname, path, dtype=None):
     """ Read Data. Return a dataset located at <path> in file <fname> as
-    a numpy array. 
+    a numpy array.
     e.g. rd( fname, '/PartType0/Coordinates' ). """
 
     data = None
-    with h5py.File( fname, 'r' ) as h5f:
-        ds = h5f[path]
-        if dtype is None:
-            dtype = ds.dtype
-        data = np.zeros( ds.shape, dtype=dtype )
-        data = ds.value
+    fid = h5py.h5f.open(six.b(fname), h5py.h5f.ACC_RDONLY)
+    dg = h5py.h5d.open(fid, path.encode('ascii'))
+    if dtype is None:
+       dtype = dg.dtype
+    data = np.zeros(dg.shape, dtype=dtype)
+    dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+    fid.close()
     return data
 
 
-
 class IonTableSpectrum:
 
     """ A class to handle the HM01 spectra in the OWLS ionization tables. """
@@ -45,17 +46,16 @@
     def __init__(self, ion_file):
 
         where = '/header/spectrum/gammahi'
-        self.GH1 = h5rd( ion_file, where ) # GH1[1/s]
+        self.GH1 = h5rd(ion_file, where) # GH1[1/s]
 
         where = '/header/spectrum/logenergy_ryd'
-        self.logryd = h5rd( ion_file, where ) # E[ryd]  
+        self.logryd = h5rd(ion_file, where) # E[ryd]
 
         where = '/header/spectrum/logflux'
-        self.logflux = h5rd( ion_file, where ) # J[ergs/s/Hz/Sr/cm^2] 
+        self.logflux = h5rd(ion_file, where) # J[ergs/s/Hz/Sr/cm^2]
 
         where = '/header/spectrum/redshift'
-        self.z = h5rd( ion_file, where ) # z
-
+        self.z = h5rd(ion_file, where) # z
 
 
     def return_table_GH1_at_z(self,z):
@@ -68,9 +68,9 @@
         else:
             i_zhi = i_zlo
             i_zlo = i_zlo - 1
-    
+
         z_frac = (z - self.z[i_zlo]) / (self.z[i_zhi] - self.z[i_zlo])
-   
+
         # find GH1 from table
         #-----------------------------------------------------------------
         logGH1_all = np.log10( self.GH1 )
@@ -80,8 +80,6 @@
         GH1_table = 10.0**logGH1_table
 
         return GH1_table
-    
-
 
 
 class IonTableOWLS:
@@ -90,7 +88,7 @@
 
     DELTA_nH = 0.25
     DELTA_T = 0.1
-    
+
     def __init__(self, ion_file):
 
         self.ion_file = ion_file
@@ -104,13 +102,13 @@
 
         # read the ionization fractions
         # linear values stored in file so take log here
-        # ionbal is the ionization balance (i.e. fraction) 
+        # ionbal is the ionization balance (i.e. fraction)
         #---------------------------------------------------------------
-        self.ionbal = h5rd( ion_file, '/ionbal' ).astype(np.float64)    
+        self.ionbal = h5rd( ion_file, '/ionbal' ).astype(np.float64)
         self.ionbal_orig = self.ionbal.copy()
 
-        ipositive = np.where( self.ionbal > 0.0 )
-        izero = np.where( self.ionbal <= 0.0 )
+        ipositive = self.ionbal > 0.0
+        izero = np.logical_not(ipositive)
         self.ionbal[izero] = self.ionbal[ipositive].min()
 
         self.ionbal = np.log10( self.ionbal )
@@ -118,7 +116,7 @@
 
         # load in background spectrum
         #---------------------------------------------------------------
-        self.spectrum = IonTableSpectrum( ion_file ) 
+        self.spectrum = IonTableSpectrum( ion_file )
 
         # calculate the spacing along each dimension
         #---------------------------------------------------------------
@@ -129,9 +127,6 @@
         self.order_str = '[log nH, log T, z]'
 
 
-            
-        
-                                                
     # sets iz and fz
     #-----------------------------------------------------
     def set_iz( self, z ):
@@ -149,11 +144,11 @@
                     self.fz = ( z - self.z[iz] ) / self.dz[iz]
                     break
 
-        
+
 
     # interpolate the table at a fixed redshift for the input
-    # values of nH and T ( input should be log ).  A simple    
-    # tri-linear interpolation is used.  
+    # values of nH and T ( input should be log ).  A simple
+    # tri-linear interpolation is used.
     #-----------------------------------------------------
     def interp( self, nH, T ):
 
@@ -162,7 +157,7 @@
 
         if nH.size != T.size:
             raise ValueError(' owls_ion_tables: array size mismatch !!! ')
-        
+
         # field discovery will have nH.size == 1 and T.size == 1
         # in that case we simply return 1.0
 
@@ -185,14 +180,14 @@
         x_T_clip = np.clip( x_T, 0.0, self.T.size-1.001 )
         fT,iT = np.modf( x_T_clip )
         iT = iT.astype( np.int32 )
-        
+
 
         # short names for previously calculated iz and fz
         #-----------------------------------------------------
         iz = self.iz
         fz = self.fz
 
-                   
+
         # calculate interpolated value
         # use tri-linear interpolation on the log values
         #-----------------------------------------------------

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -27,6 +27,8 @@
     ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
+from yt.funcs import \
+    get_requests
 from .fields import \
     SDFFieldInfo
 from yt.utilities.sdf import \
@@ -34,11 +36,6 @@
     SDFIndex,\
     HTTPSDFRead
 
-try:
-    import requests
-except ImportError:
-    requests = None
-
 @contextlib.contextmanager
 def safeopen(*args, **kwargs):
     if sys.version[0] != '3':
@@ -195,7 +192,9 @@
     def _is_valid(cls, *args, **kwargs):
         sdf_header = kwargs.get('sdf_header', args[0])
         if sdf_header.startswith("http"):
-            if requests is None: return False
+            requests = get_requests()
+            if requests is None: 
+                return False
             hreq = requests.get(sdf_header, stream=True)
             if hreq.status_code != 200: return False
             # Grab a whole 4k page.

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r a0d6a1d373884712fb732c8711713043c9b65a41 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -953,3 +953,10 @@
         raise RuntimeError(
             "Please install palettable to use colorbrewer colormaps")
     return bmap.get_mpl_colormap(N=cmap[2])
+
+def get_requests():
+    try:
+        import requests
+    except ImportError:
+        requests = None
+    return requests

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/492a3be5494c/
Changeset:   492a3be5494c
Branch:      yt
User:        jzuhone
Date:        2016-05-18 18:43:23+00:00
Summary:     Merge
Affected #:  27 files

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -1,6 +1,6 @@
 stephenskory at yahoo.com = s at skory.us
 "Stephen Skory stephenskory at yahoo.com" = s at skory.us
-yuan at astro.columbia.edu = bear0980 at gmail.com
+bear0980 at gmail.com = yuan at astro.columbia.edu
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
@@ -19,7 +19,6 @@
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
 jcforbes at ucsc.edu = jforbes at ucolick.org
-ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
 fbogert = fbogert at ucsc.edu
@@ -39,4 +38,12 @@
 jnaiman at ucolick.org = jnaiman
 migueld.deval = miguel at archlinux.net
 slevy at ncsa.illinois.edu = salevy at illinois.edu
-malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file
+malzraa at gmail.com = kellerbw at mcmaster.ca
+None = convert-repo
+dfenn = df11c at my.fsu.edu
+langmm = langmm.astro at gmail.com
+jmt354 = jmtomlinson95 at gmail.com
+desika = dnarayan at haverford.edu
+Ben Thompson = bthompson2090 at gmail.com
+goldbaum at ucolick.org = ngoldbau at illinois.edu
+ngoldbau at ucsc.edu = ngoldbau at illinois.edu

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -10,6 +10,7 @@
                 Alex Bogert (fbogert at ucsc.edu)
                 André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
+                Yi-Hao Chen (yihaochentw at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
@@ -25,10 +26,12 @@
                 William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
+                David Hannasch (David.A.Hannasch at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
                 Anni Järvenpää (anni.jarvenpaa at gmail.com)
                 Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Maximilian Katz (maximilian.katz at stonybrook.edu)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
@@ -60,6 +63,7 @@
                 Anna Rosen (rosen at ucolick.org)
                 Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
+                Hsi-Yu Schive (hyschive at gmail.com)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
                 Pat Shriwise (shriwise at wisc.edu)
@@ -75,6 +79,7 @@
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
                 Robert Thompson (rthompsonj at gmail.com)
+                Joseph Tomlinson (jmtomlinson95 at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -775,32 +775,38 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+a plot file, checkpoint file, or particle file. Particle files require special handling
+depending on the situation, the main issue being that they typically lack grid information. 
+The first case is when you have a plotfile and a particle file that you would like to 
+load together. In the simplest case, this occurs automatically. For instance, if you
+were in a directory with the following files:
 
 .. code-block:: none
 
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
+   radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
+   radio_halo_1kpc_hdf5_part_0100 # particle file
 
-   import yt
-   ds = yt.load("cosmoSim_coolhdf5_chk_0026")
+where the plotfile and the particle file were created at the same time (therefore having 
+particle data consistent with the grid structure of the former). Notice also that the 
+prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
+the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
+This also works when loading a number of files in a time series.
 
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
+If the two files do not have the same prefix and number, but they nevertheless have the same
+grid structure and are at the same simulation time, the particle data may be loaded with the
+``particle_filename`` optional argument to ``yt.load``:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
+However, if you don't have a corresponding plotfile for a particle file, but would still 
+like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+grid information will not be available, and the particle data will be loaded in a fashion
+similar to SPH data. 
+
 .. rubric:: Caveats
 
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c setup.py
--- a/setup.py
+++ b/setup.py
@@ -452,7 +452,7 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES + SHADERS_FILES,
+    data_files=MAPSERVER_FILES + [(SHADERS_DIR, SHADERS_FILES)],
     ext_modules=cython_extensions + extensions
 )
 

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -14,7 +14,7 @@
   local_fits_000:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_000:
+  local_flash_001:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_000:

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -35,10 +35,12 @@
 
 class LightRay(CosmologySplice):
     """
-    A LightRay object is a one-dimensional object representing the trajectory 
-    of a ray of light as it passes through one or more datasets (simple and
-    compound rays respectively).  One can sample any of the fields intersected
-    by the LightRay object as it passed through the dataset(s).
+    A 1D object representing the path of a light ray passing through a 
+    simulation.  LightRays can be either simple, where they pass through a 
+    single dataset, or compound, where they pass through consecutive 
+    datasets from the same cosmological simulation.  One can sample any of 
+    the fields intersected by the LightRay object as it passed through 
+    the dataset(s).
     
     For compound rays, the LightRay stacks together multiple datasets in a time
     series in order to approximate a LightRay's path through a volume

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -433,57 +433,62 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
+class OctreeSubsetBlockSlicePosition(object):
+    def __init__(self, ind, block_slice):
+        self.ind = ind
+        self.block_slice = block_slice
+        nz = self.block_slice.octree_subset.nz
+        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+
+    def __getitem__(self, key):
+        bs = self.block_slice
+        rv = bs.octree_subset[key][:,:,:,self.ind]
+        if bs.octree_subset._block_reorder:
+            rv = rv.copy(order=bs.octree_subset._block_reorder)
+        return rv
+
+    @property
+    def id(self):
+        return self.ind
+
+    @property
+    def Level(self):
+        return self.block_slice._ires[0,0,0,self.ind]
+
+    @property
+    def LeftEdge(self):
+        LE = (self.block_slice._fcoords[0,0,0,self.ind,:]
+            - self.block_slice._fwidth[0,0,0,self.ind,:]*0.5)
+        return LE
+
+    @property
+    def RightEdge(self):
+        RE = (self.block_slice._fcoords[-1,-1,-1,self.ind,:]
+            + self.block_slice._fwidth[-1,-1,-1,self.ind,:]*0.5)
+        return RE
+
+    @property
+    def dds(self):
+        return self.block_slice._fwidth[0,0,0,self.ind,:]
+
+    def clear_data(self):
+        pass
+
+    def get_vertex_centered_data(self, *args, **kwargs):
+        raise NotImplementedError
+
+
 class OctreeSubsetBlockSlice(object):
     def __init__(self, octree_subset):
-        self.ind = None
         self.octree_subset = octree_subset
         # Cache some attributes
-        nz = octree_subset.nz
-        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
         for attr in ["ires", "icoords", "fcoords", "fwidth"]:
             v = getattr(octree_subset, attr)
             setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
 
     def __iter__(self):
         for i in range(self._ires.shape[-1]):
-            self.ind = i
-            yield i, self
-
-    def clear_data(self):
-        pass
-
-    def __getitem__(self, key):
-        rv = self.octree_subset[key][:,:,:,self.ind]
-        if self.octree_subset._block_reorder:
-            rv = rv.copy(order=self.octree_subset._block_reorder)
-        return rv
-
-    def get_vertex_centered_data(self, *args, **kwargs):
-        raise NotImplementedError
-
-    @property
-    def id(self):
-        return np.random.randint(1)
-
-    @property
-    def Level(self):
-        return self._ires[0,0,0,self.ind]
-
-    @property
-    def LeftEdge(self):
-        LE = (self._fcoords[0,0,0,self.ind,:]
-            - self._fwidth[0,0,0,self.ind,:]*0.5)
-        return LE
-
-    @property
-    def RightEdge(self):
-        RE = (self._fcoords[-1,-1,-1,self.ind,:]
-            + self._fwidth[-1,-1,-1,self.ind,:]*0.5)
-        return RE
-
-    @property
-    def dds(self):
-        return self._fwidth[0,0,0,self.ind,:]
+            yield i, OctreeSubsetBlockSlicePosition(i, self)
 
 class YTPositionArray(YTArray):
     @property

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -444,6 +444,16 @@
             if "AppendActiveParticleType" in self.dataset.parameters:
                 ap_fields = self._detect_active_particle_fields()
                 field_list = list(set(field_list).union(ap_fields))
+                if not any(f[0] == 'io' for f in field_list):
+                    if 'io' in self.dataset.particle_types_raw:
+                        ptypes_raw = list(self.dataset.particle_types_raw)
+                        ptypes_raw.remove('io')
+                        self.dataset.particle_types_raw = tuple(ptypes_raw)
+
+                    if 'io' in self.dataset.particle_types:
+                        ptypes = list(self.dataset.particle_types)
+                        ptypes.remove('io')
+                        self.dataset.particle_types = tuple(ptypes)
             ptypes = self.dataset.particle_types
             ptypes_raw = self.dataset.particle_types_raw
         else:

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -126,7 +126,9 @@
 def test_active_particle_datasets():
     two_sph = data_dir_load(two_sphere_test)
     assert 'AccretingParticle' in two_sph.particle_types_raw
-    assert_equal(len(two_sph.particle_unions), 0)
+    assert 'io' not in two_sph.particle_types_raw
+    assert 'all' in two_sph.particle_types
+    assert_equal(len(two_sph.particle_unions), 1)
     pfields = ['GridID', 'creation_time', 'dynamical_time',
                'identifier', 'level', 'metallicity', 'particle_mass']
     pfields += ['particle_position_%s' % d for d in 'xyz']

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -154,7 +154,8 @@
                                               units_override=units_override)
         self.index_filename = filename
         self.storage_filename = storage_filename
-        self.default_field = ("connect1", "diffused")
+        self.default_field = [f for f in self.field_list 
+                              if f[0] == 'connect1'][-1]
 
     def _set_code_unit_attributes(self):
         # This is where quantities are created that represent the various

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -16,12 +16,14 @@
 from .data_structures import \
       FLASHGrid, \
       FLASHHierarchy, \
-      FLASHDataset
+      FLASHDataset, \
+      FLASHParticleDataset
 
 from .fields import \
       FLASHFieldInfo
 
 from .io import \
-      IOHandlerFLASH
+      IOHandlerFLASH, \
+      IOHandlerFLASHParticle
 
 from . import tests

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -18,13 +18,15 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.funcs import mylog
 from yt.geometry.grid_geometry_handler import \
     GridIndex
-from yt.data_objects.static_output import \
-    Dataset
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.physical_ratios import cm_per_mpc
@@ -251,8 +253,6 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         self.temperature_unit = self.quan(temperature_factor, "K")
-        # Still need to deal with:
-        #self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         
     def set_code_units(self):
@@ -437,3 +437,52 @@
 
     def close(self):
         self._handle.close()
+
+class FLASHParticleFile(ParticleFile):
+    pass
+
+class FLASHParticleDataset(FLASHDataset):
+    _index_class = ParticleIndex
+    over_refine_factor = 1
+    filter_bbox = False
+    _file_class = FLASHParticleFile
+
+    def __init__(self, filename, dataset_type='flash_particle_hdf5',
+                 storage_filename = None,
+                 units_override = None,
+                 n_ref = 64, unit_system = "cgs"):
+
+        if self._handle is not None: return
+        self._handle = HDF5FileHandler(filename)
+        self.n_ref = n_ref
+        self.refine_by = 2
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.storage_filename = storage_filename
+
+    def _parse_parameter_file(self):
+        # Let the superclass do all the work but then
+        # fix the domain dimensions
+        super(FLASHParticleDataset, self)._parse_parameter_file()
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.zeros(3, "int32")
+        self.domain_dimensions[:self.dimensionality] = nz
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = HDF5FileHandler(args[0])
+            if "bounding box" not in fileh["/"].keys() \
+                and "localnp" in fileh["/"].keys():
+                return True
+        except IOError:
+            pass
+        return False
+
+    @classmethod
+    def _guess_candidates(cls, base, directories, files):
+        candidates = [_ for _ in files if "_hdf5_part_" in _]
+        # Typically, Flash won't have nested outputs.
+        return candidates, (len(candidates) == 0)

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -20,6 +20,8 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import AlwaysSelector
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def particle_sequences(grids):
@@ -34,6 +36,16 @@
         seq = list(v[1] for v in g)
         yield seq
 
+def determine_particle_fields(handle):
+    try:
+        particle_fields = [s[0].decode("ascii","ignore").strip()
+                           for s in handle["/particle names"][:]]
+        _particle_fields = dict([("particle_" + s, i) for i, s in
+                                 enumerate(particle_fields)])
+    except KeyError:
+        _particle_fields = {}
+    return _particle_fields
+
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = False
     _dataset_type = "flash_hdf5"
@@ -43,15 +55,7 @@
         # Now we cache the particle fields
         self._handle = ds._handle
         self._particle_handle = ds._particle_handle
-        
-        try :
-            particle_fields = [s[0].decode("ascii","ignore").strip()
-                               for s in
-                               self._particle_handle["/particle names"][:]]
-            self._particle_fields = dict([("particle_" + s, i) for i, s in
-                                          enumerate(particle_fields)])
-        except KeyError:
-            self._particle_fields = {}
+        self._particle_fields = determine_particle_fields(self._particle_handle)
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -154,3 +158,96 @@
                     rv[g.id][field] = np.asarray(data[...,i], "=f8")
         return rv
 
+class IOHandlerFLASHParticle(BaseIOHandler):
+    _particle_reader = True
+    _dataset_type = "flash_particle_hdf5"
+
+    def __init__(self, ds):
+        super(IOHandlerFLASHParticle, self).__init__(ds)
+        # Now we cache the particle fields
+        self._handle = ds._handle
+        self._particle_fields = determine_particle_fields(self._handle)
+        self._position_fields = [self._particle_fields["particle_pos%s" % ax]
+                                 for ax in 'xyz']
+        self._chunksize = 32**3
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        fi = self._particle_fields[field]
+                        data = p_fields[total-count:total, fi]
+                        yield (ptype, field), data[mask]
+
+    def _initialize_index(self, data_file, regions):
+        p_fields = self._handle["/tracer particles"]
+        px, py, pz = self._position_fields
+        pcount = self._count_particles(data_file)["io"]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(self._chunksize, pcount - ind)
+            pos = np.empty((npart, 3), dtype="=f8")
+            pos[:,0] = p_fields[ind:ind+npart, px]
+            pos[:,1] = p_fields[ind:ind+npart, py]
+            pos[:,2] = p_fields[ind:ind+npart, pz]
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = \
+                compute_morton(pos[:,0], pos[:,1], pos[:,2],
+                               data_file.ds.domain_left_edge,
+                               data_file.ds.domain_right_edge)
+            ind += self._chunksize
+        return morton
+
+    def _count_particles(self, data_file):
+        pcount = {"io": self._handle["/localnp"][:].sum()}
+        return pcount
+
+    def _identify_fields(self, data_file):
+        fields = [("io", field) for field in self._particle_fields]
+        return fields, {}

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -20,8 +20,11 @@
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    data_dir_load
-from yt.frontends.flash.api import FLASHDataset
+    data_dir_load, \
+    sph_answer
+from yt.frontends.flash.api import FLASHDataset, \
+    FLASHParticleDataset
+from collections import OrderedDict
 
 _fields = ("temperature", "density", "velocity_magnitude")
 
@@ -45,7 +48,6 @@
         test_wind_tunnel.__name__ = test.description
         yield test
 
-
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
@@ -54,3 +56,28 @@
 def test_units_override():
     for test in units_override_check(sloshing):
         yield test
+
+fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
+
+fid_1to3_b1_fields = OrderedDict(
+    [
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "all_cic_velocity_x"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_y"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_z"), ("deposit", "all_cic")),
+    ]
+)
+
+
+ at requires_file(fid_1to3_b1)
+def test_FLASHParticleDataset():
+    assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset)
+
+ at requires_ds(fid_1to3_b1, big_data=True)
+def test_fid_1to3_b1():
+    ds = data_dir_load(fid_1to3_b1)
+    for test in sph_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields):
+        test_fid_1to3_b1.__name__ = test.description
+        yield test

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1826,5 +1826,7 @@
 
     sds._node_fields = node_data[0].keys()
     sds._elem_fields = elem_data[0].keys()
+    sds.default_field = [f for f in sds.field_list 
+                         if f[0] == 'connect1'][-1]
 
     return sds

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -318,7 +318,10 @@
         ipshell(header = __header % dd,
                 local_ns = loc, global_ns = glo)
     else:
-        from IPython.config.loader import Config
+        try:
+            from traitlets.config.loader import Config
+        except ImportError:
+            from IPython.config.loader import Config
         cfg = Config()
         cfg.InteractiveShellEmbed.local_ns = loc
         cfg.InteractiveShellEmbed.global_ns = glo

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -366,8 +366,12 @@
         """ Test unit inequality. """
         if not isinstance(u, Unit):
             return True
-        return \
-          (self.base_value != u.base_value or self.dimensions != u.dimensions)
+        if self.base_value != u.base_value:
+            return True
+        # use 'is' comparison dimensions to avoid expensive sympy operation
+        if self.dimensions is u.dimensions:
+            return False
+        return self.dimensions != u.dimensions
 
     def copy(self):
         return copy.deepcopy(self)
@@ -389,6 +393,9 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
+        # test first for 'is' equality to avoid expensive sympy operation
+        if self.dimensions is other_unit.dimensions:
+            return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 
     @property

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -174,6 +174,31 @@
 
     return other
 
+ at lru_cache(maxsize=128, typed=False)
+def _unit_repr_check_same(my_units, other_units):
+    """
+    Takes a Unit object, or string of known unit symbol, and check that it
+    is compatible with this quantity. Returns Unit object.
+
+    """
+    # let Unit() handle units arg if it's not already a Unit obj.
+    if not isinstance(other_units, Unit):
+        other_units = Unit(other_units, registry=my_units.registry)
+
+    equiv_dims = em_dimensions.get(my_units.dimensions, None)
+    if equiv_dims == other_units.dimensions:
+        if current_mks in equiv_dims.free_symbols:
+            base = "SI"
+        else:
+            base = "CGS"
+        raise YTEquivalentDimsError(my_units, other_units, base)
+
+    if not my_units.same_dimensions_as(other_units):
+        raise YTUnitConversionError(
+            my_units, my_units.dimensions, other_units, other_units.dimensions)
+
+    return other_units
+
 unary_operators = (
     negative, absolute, rint, ones_like, sign, conj, exp, exp2, log, log2,
     log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
@@ -430,30 +455,6 @@
     # Start unit conversion methods
     #
 
-    def _unit_repr_check_same(self, units):
-        """
-        Takes a Unit object, or string of known unit symbol, and check that it
-        is compatible with this quantity. Returns Unit object.
-
-        """
-        # let Unit() handle units arg if it's not already a Unit obj.
-        if not isinstance(units, Unit):
-            units = Unit(units, registry=self.units.registry)
-
-        equiv_dims = em_dimensions.get(self.units.dimensions,None)
-        if equiv_dims == units.dimensions:
-            if current_mks in equiv_dims.free_symbols:
-                base = "SI"
-            else:
-                base = "CGS"
-            raise YTEquivalentDimsError(self.units, units, base)
-
-        if not self.units.same_dimensions_as(units):
-            raise YTUnitConversionError(
-                self.units, self.units.dimensions, units, units.dimensions)
-
-        return units
-
     def convert_to_units(self, units):
         """
         Convert the array and units to the given units.
@@ -464,7 +465,7 @@
             The units you want to convert to.
 
         """
-        new_units = self._unit_repr_check_same(units)
+        new_units = _unit_repr_check_same(self.units, units)
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
@@ -523,11 +524,10 @@
         YTArray
 
         """
-        new_units = self._unit_repr_check_same(units)
+        new_units = _unit_repr_check_same(self.units, units)
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
-        new_array = self * conversion_factor
-        new_array.units = new_units
+        new_array = type(self)(self.ndview * conversion_factor, new_units)
 
         if offset:
             np.subtract(new_array, offset*new_array.uq, new_array)

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -692,7 +692,10 @@
                   "\nHi there!  Welcome to yt.\n\nWe've loaded your dataset as 'ds'.  Enjoy!"
                   )
         else:
-            from IPython.config.loader import Config
+            try:
+                from traitlets.config.loader import Config
+            except ImportError:
+                from IPython.config.loader import Config
             import sys
             cfg = Config()
             # prepend sys.path with current working directory

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -438,7 +438,6 @@
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
     if best_dim == -1:
-        print 'Failed to split grid.'
         return -1
 
 
@@ -509,6 +508,11 @@
             best_dim = dim
             my_max = n_unique
             my_split = (n_unique-1)/2
+    if best_dim == -1:
+        for i in range(3):
+            free(uniquedims[i])
+        free(uniquedims)
+        return -1, 0, 0, 0
     # I recognize how lame this is.
     cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
     for i in range(my_max):

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -37,7 +37,7 @@
   {-1, -1, -1}
 };
 
-// Triangule wedges
+// Triangulate wedges
 int triangulate_wedge[MAX_NUM_TRI][3] = {
   {0, 1, 2},
   {0, 3, 1},

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -18,6 +18,7 @@
 from collections import OrderedDict
 import matplotlib.cm as cm
 import numpy as np
+import ctypes
 
 from yt.utilities.math_utils import \
     get_translate_matrix, \
@@ -29,7 +30,6 @@
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
 
-
 bbox_vertices = np.array(
       [[ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  1.],
@@ -77,6 +77,27 @@
      +1.0, +1.0, 0.0], dtype=np.float32
 )
 
+triangulate_hex = np.array([
+    [0, 2, 1], [0, 3, 2],
+    [4, 5, 6], [4, 6, 7],
+    [0, 1, 5], [0, 5, 4],
+    [1, 2, 6], [1, 6, 5],
+    [0, 7, 3], [0, 4, 7],
+    [3, 6, 2], [3, 7, 6]]
+)
+
+triangulate_tetra = np.array([
+    [0, 1, 3], [2, 3, 1],
+    [0, 3, 2], [0, 2, 1]]
+)
+
+triangulate_wedge = np.array([
+    [3, 0, 1], [4, 3, 1],
+    [2, 5, 4], [2, 4, 1],
+    [0, 3, 2], [2, 3, 5],
+    [3, 4, 5], [0, 2, 1]]
+)
+
 
 class IDVCamera(object):
     '''Camera object used in the Interactive Data Visualization
@@ -532,8 +553,180 @@
                         GL.GL_RED, GL.GL_FLOAT, n_data.T)
             GL.glGenerateMipmap(GL.GL_TEXTURE_3D)
 
+class ColorBarSceneComponent(SceneComponent):
+    ''' 
 
-class SceneGraph(SceneComponent):
+    A class for scene components that apply colorbars using a 1D texture. 
+
+    '''
+
+    def __init__(self):
+        super(ColorBarSceneComponent, self).__init__()
+        self.camera = None
+        self.cmap_texture = None
+
+    def set_camera(self, camera):
+        pass
+
+    def update_minmax(self):
+        pass
+
+    def setup_cmap_tex(self):
+        '''Creates 1D texture that will hold colormap in framebuffer'''
+        self.cmap_texture = GL.glGenTextures(1)   # create target texture
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
+        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
+        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
+                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
+
+    def update_cmap_tex(self):
+        '''Updates 1D texture with colormap that's used in framebuffer'''
+        if self.camera is None or not self.camera.cmap_new:
+            return
+
+        if self.cmap_texture is None:
+            self.setup_cmap_tex()
+
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
+                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        self.camera.cmap_new = False
+    
+class MeshSceneComponent(ColorBarSceneComponent):
+    '''
+
+    A scene component for representing unstructured mesh data.
+
+    '''
+
+    def __init__(self, data_source, field):
+        super(MeshSceneComponent, self).__init__()
+        self.set_shader("mesh.v")
+        self.set_shader("mesh.f")
+
+        self.data_source = None
+        self.redraw = True
+
+        GL.glEnable(GL.GL_DEPTH_TEST)
+        GL.glDepthFunc(GL.GL_LESS)
+        GL.glEnable(GL.GL_CULL_FACE)
+        GL.glCullFace(GL.GL_BACK)
+
+        vertices, data, indices = self.get_mesh_data(data_source, field)
+
+        self._initialize_vertex_array("mesh_info")
+        GL.glBindVertexArray(self.vert_arrays["mesh_info"])
+
+        self.add_vert_attrib("vertex_buffer", vertices, vertices.size)
+        self.add_vert_attrib("data_buffer", data, data.size)
+
+        self.vert_attrib["element_buffer"] = (GL.glGenBuffers(1), indices.size)
+        GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+        GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL.GL_STATIC_DRAW)
+
+        self.transform_matrix = GL.glGetUniformLocation(self.program.program,
+                                                        "model_to_clip")
+
+        self.cmin = data.min()
+        self.cmax = data.max()
+
+    def set_camera(self, camera):
+        r""" Sets the camera orientation for the entire scene.
+
+        Parameters
+        ----------
+        camera : Camera
+
+        """
+        self.camera = camera
+        self.camera.cmap_min = float(self.cmin)
+        self.camera.cmap_max = float(self.cmax)
+        self.redraw = True
+
+    def get_mesh_data(self, data_source, field):
+        """
+        
+        This reads the mesh data into a form that can be fed in to OpenGL.
+        
+        """
+
+        # get mesh information
+        ftype, fname = field
+        mesh_id = int(ftype[-1])
+        mesh = data_source.ds.index.meshes[mesh_id-1]
+        offset = mesh._index_offset
+        vertices = mesh.connectivity_coords
+        indices  = mesh.connectivity_indices - offset
+
+        # get vertex data
+        data = data_source[field]
+        vertex_data = np.zeros(vertices.shape[0], dtype=data.dtype)
+        vertex_data[indices.flatten()] = data.flatten()
+
+        if indices.shape[1] == 8:
+            tri_array = triangulate_hex
+        elif indices.shape[1] == 4:
+            tri_array = triangulate_tetra
+        elif indices.shape[1] == 6:
+            tri_array = triangulate_wedge
+        else:
+            raise NotImplementedError
+
+        tri_indices = []
+        for elem in indices:
+            for tri in tri_array:
+                tri_indices.append(elem[tri])
+        tri_indices = np.array(tri_indices)
+
+        v = vertices.astype(np.float32).flatten()
+        d = vertex_data.astype(np.float32).flatten()
+        i = tri_indices.astype(np.uint32).flatten()
+
+        return v, d, i
+
+    def run_program(self):
+        """ Renders one frame of the scene. """
+        with self.program.enable():
+
+            # Handle colormap
+            self.update_cmap_tex()
+
+            GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
+            projection_matrix = self.camera.projection_matrix
+            view_matrix = self.camera.view_matrix
+            model_to_clip = np.dot(projection_matrix, view_matrix)
+            GL.glUniformMatrix4fv(self.transform_matrix, 1, True, model_to_clip)
+
+            GL.glActiveTexture(GL.GL_TEXTURE1)
+            GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+
+            self.program._set_uniform("cmap", 0)
+            self.program._set_uniform("cmap_min", self.camera.cmap_min)
+            self.program._set_uniform("cmap_max", self.camera.cmap_max)
+
+            GL.glEnableVertexAttribArray(0)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["vertex_buffer"][0])
+            GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glEnableVertexAttribArray(1)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["data_buffer"][0])
+            GL.glVertexAttribPointer(1, 1, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+            GL.glDrawElements(GL.GL_TRIANGLES, self.vert_attrib["element_buffer"][1],
+                              GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))
+
+            GL.glDisableVertexAttribArray(0)
+            GL.glDisableVertexAttribArray(1)
+
+    render = run_program
+
+
+class SceneGraph(ColorBarSceneComponent):
     """A basic OpenGL render for IDV.
 
     The SceneGraph class is the primary driver behind creating a IDV rendering.
@@ -554,8 +747,6 @@
         self.collections = []
         self.fbo = None
         self.fb_texture = None
-        self.cmap_texture = None
-        self.camera = None
         self.shader_program = None
         self.fb_shader_program = None
         self.min_val, self.max_val = 1e60, -1e60
@@ -587,36 +778,8 @@
 
         self.setup_fb(self.width, self.height)
 
-    def setup_cmap_tex(self):
-        '''Creates 1D texture that will hold colormap in framebuffer'''
-        self.cmap_texture = GL.glGenTextures(1)   # create target texture
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
-        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
-        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
-                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-
-
-    def update_cmap_tex(self):
-        '''Updates 1D texture with colormap that's used in framebuffer'''
-        if self.camera is None or not self.camera.cmap_new:
-            return
-
-        if self.cmap_texture is None:
-            self.setup_cmap_tex()
-
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
-                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-        self.camera.cmap_new = False
-
-
     def setup_fb(self, width, height):
-        '''Setups FrameBuffer that will be used as container
+        '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''
         # Clean up old FB and Texture
         if self.fb_texture is not None and \
@@ -670,7 +833,6 @@
         status = GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)
         assert status == GL.GL_FRAMEBUFFER_COMPLETE, status
 
-
     def add_collection(self, collection):
         r"""Adds a block collection to the scene. Collections must not overlap.
 

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -58,7 +58,8 @@
         raise ImportError("This functionality requires the cyglfw3 and PyOpenGL "
                           "packages to be installed.")
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
+    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
+        MeshSceneComponent
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -78,16 +79,23 @@
     if cam_focus is None:
         cam_focus = dobj.ds.domain_center
 
+    rc = RenderingContext(*window_size)
+
+    if hasattr(dobj.ds.index, "meshes"):
+        # unstructured mesh datasets tend to have tight
+        # domain boundaries, do some extra padding here.
+        cam_position = 3.0*dobj.ds.domain_right_edge
+        scene = MeshSceneComponent(dobj, field)
+    else:
+        scene = SceneGraph()
+        collection = BlockCollection()
+        collection.add_data(dobj, field)
+        scene.add_collection(collection)
+
     aspect_ratio = window_size[1] / window_size[0]
     far_plane = np.linalg.norm(cam_focus - cam_position) * 2.0
     near_plane = 0.01 * far_plane
 
-    rc = RenderingContext(*window_size)
-    scene = SceneGraph()
-    collection = BlockCollection()
-    collection.add_data(dobj, field)
-    scene.add_collection(collection)
-
     c = TrackballCamera(position=cam_position, focus=cam_focus, near_plane=near_plane,
                         far_plane=far_plane, aspect_ratio=aspect_ratio)
     rc.start_loop(scene, c)

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -29,7 +29,7 @@
 class ShaderProgram(object):
     '''
     Wrapper class that compiles and links vertex and fragment shaders
-    into shader program.
+    into a shader program.
 
     Parameters
     ----------
@@ -269,3 +269,13 @@
     '''A second pass vertex shader that performs no operations on vertices'''
     _source = "passthrough.vertexshader"
     _shader_name = "passthrough.v"
+
+class MeshVertexShader(VertexShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.vertexshader"
+    _shader_name = "mesh.v"
+
+class MeshFragmentShader(FragmentShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.fragmentshader"
+    _shader_name = "mesh.f"

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/visualization/volume_rendering/shaders/mesh.fragmentshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
@@ -0,0 +1,17 @@
+#version 330 core
+
+in float fragmentData;
+out vec4 color;
+
+uniform sampler1D cmap;
+uniform float cmap_min;
+uniform float cmap_max;
+
+void main()
+{
+    float data = fragmentData;
+    float cm = cmap_min;
+    float cp = cmap_max;
+
+    color = texture(cmap, (data - cm) / (cp - cm));
+}

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r 492a3be5494c907eb8e8a18c9888a57cfc989a1c yt/visualization/volume_rendering/shaders/mesh.vertexshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.vertexshader
@@ -0,0 +1,11 @@
+#version 330 core
+
+layout(location = 0) in vec3 vertexPosition_modelspace;
+layout(location = 1) in float vertexData;
+out float fragmentData;
+uniform mat4 model_to_clip;
+void main()
+{
+    gl_Position = model_to_clip * vec4(vertexPosition_modelspace, 1);
+    fragmentData = vertexData;
+}


https://bitbucket.org/yt_analysis/yt/commits/87b1834b9f58/
Changeset:   87b1834b9f58
Branch:      yt
User:        jzuhone
Date:        2016-05-18 18:47:44+00:00
Summary:     Merge
Affected #:  0 files



https://bitbucket.org/yt_analysis/yt/commits/d1b3901bd636/
Changeset:   d1b3901bd636
Branch:      yt
User:        jzuhone
Date:        2016-05-19 03:32:57+00:00
Summary:     Group blocks together into bigger meshes
Affected #:  2 files

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -57,12 +57,13 @@
     _index_offset = 0
     _id_offset = 0
 
-    def __init__(self, *args, **kwargs):
-        super(AthenaPPLogarithmicMesh, self).__init__(*args, **kwargs)
-
-    @property
-    def id(self):
-        return self.mesh_id
+    def __init__(self, mesh_id, filename, connectivity_indices,
+                 connectivity_coords, index, blocks, dims):
+        super(AthenaPPLogarithmicMesh, self).__init__(mesh_id, filename, 
+                                                      connectivity_indices,
+                                                      connectivity_coords, index)
+        self.mesh_blocks = blocks
+        self.mesh_dims = dims
 
 class AthenaPPLogarithmicIndex(UnstructuredIndex):
     def __init__(self, ds, dataset_type = 'athena++'):
@@ -73,30 +74,76 @@
         self.dataset_type = dataset_type
 
     def _initialize_mesh(self):
-        mylog.debug("Setting up grids.")
-        num_grids = self._handle.attrs["NumMeshBlocks"]
+        mylog.debug("Setting up meshes.")
+        num_blocks = self._handle.attrs["NumMeshBlocks"]
+        log_loc = self._handle['LogicalLocations']
+        levels = self._handle["Levels"]
+        nx, ny, nz = self._handle.attrs["MeshBlockSize"]
+        x1f = self._handle["x1f"]
+        x2f = self._handle["x2f"]
+        x3f = self._handle["x3f"]
+        nblock_x = np.max(log_loc[:,0])+1
+        nblock_y = np.max(log_loc[:,1])+1
+        nblock_z = np.max(log_loc[:,2])+1
+
+        block_grid = -np.ones((nblock_x,nblock_y,nblock_z), dtype=np.int)
+        level_grid = -np.ones((nblock_x,nblock_y,nblock_z), dtype=np.int)
+
+        for i in range(num_blocks):
+            block_grid[log_loc[i][0],log_loc[i][1],log_loc[i][2]] = i
+            level_grid[log_loc[i][0],log_loc[i][1],log_loc[i][2]] = levels[i]
+
+        block_list = list(range(num_blocks))
+        bc = []
+        for i in range(num_blocks):
+            if i in block_list:
+                ii, jj, kk = log_loc[i]
+                loc_levels = level_grid[ii:ii+2,jj:jj+2,kk:kk+2]
+                if np.unique(loc_levels).size == 1:
+                    loc_ids = block_grid[ii:ii+2,jj:jj+2,kk:kk+2]
+                    bc.append(loc_ids)
+                    [block_list.remove(k) for k in loc_ids.flat]
+                else:
+                    bc.append(np.array([i]))
+                    block_list.remove(i)
+
+        num_meshes = len(bc)
+
         self.meshes = []
-        for i in range(num_grids):
-            x = self._handle["x1f"][i,:]
-            y = self._handle["x2f"][i,:]
-            z = self._handle["x3f"][i,:]
-            nx, ny, nz = self._handle.attrs["MeshBlockSize"]+1
-            coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
+        for i in range(num_meshes):
+            if bc[i].size > 1:
+                ob = bc[i][0,0,0]
+                xb = bc[i][1,0,0]
+                yb = bc[i][0,1,0]
+                zb = bc[i][0,0,1]
+                x = np.concatenate([x1f[ob,:-1], x1f[xb,:]])
+                y = np.concatenate([x2f[ob,:-1], x2f[yb,:]])
+                z = np.concatenate([x3f[ob,:-1], x3f[zb,:]])
+            else:
+                x = x1f[bc[i],:]
+                y = x2f[bc[i],:]
+                z = x3f[bc[i],:]
+            nxm = x.size
+            nym = y.size
+            nzm = z.size
+            coords = np.zeros((nxm, nym, nzm, 3), dtype="float64", order="C")
             coords[:,:,:,0] = x[:,None,None]
             coords[:,:,:,1] = y[None,:,None]
             coords[:,:,:,2] = z[None,None,:]
-            coords.shape = (nx * ny * nz, 3)
-            cycle = np.rollaxis(np.indices((nx-1,ny-1,nz-1)), 0, 4)
-            cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+            coords.shape = (nxm * nym * nzm, 3)
+            cycle = np.rollaxis(np.indices((nxm-1,nym-1,nzm-1)), 0, 4)
+            cycle.shape = ((nxm-1)*(nym-1)*(nzm-1), 3)
             off = _cis + cycle[:, np.newaxis]
-            connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+            connectivity = ((off[:,:,0] * nym) + off[:,:,1]) * nzm + off[:,:,2]
             mesh = AthenaPPLogarithmicMesh(i,
                                            self.index_filename,
                                            connectivity,
                                            coords,
-                                           self)
+                                           self,
+                                           bc[i],
+                                           np.array([nxm-1, nym-1, nzm-1]))
             self.meshes.append(mesh)
-        mylog.debug("Done setting up grids.")
+        mylog.debug("Done setting up meshes.")
 
     def _detect_output_fields(self):
         self.field_list = [("athena++", k) for k in self.ds._field_map]
@@ -237,8 +284,10 @@
         zrat = self._handle.attrs["RootGridX3"][2]
         if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:
             self._index_class = AthenaPPLogarithmicIndex
+            self.logarithmic = True
         else:
             self._index_class = AthenaPPHierarchy
+            self.logarithmic = False
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.filename = filename

diff -r 39587bc119b113ac335f63954f5cf0ddab20ab4a -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -57,15 +57,34 @@
             ds = f["/%s" % dname]
             ind = 0
             for chunk in chunks:
-                for gs in grid_sequences(chunk.objs):
-                    start = gs[0].id - gs[0]._id_offset
-                    end = gs[-1].id - gs[-1]._id_offset + 1
-                    data = ds[fdi,start:end,:,:,:].transpose()
-                    for i, g in enumerate(gs):
-                        ind += g.select(selector, data[...,i], rv[field], ind)
+                if self.ds.logarithmic:
+                    for mesh in chunk.objs:
+                        if mesh.mesh_blocks.size == 1:
+                            id = mesh.mesh_blocks[0]
+                            data = ds[fdi,id,:,:,:].transpose()
+                        else:
+                            nx, ny, nz = mesh.mesh_dims/2
+                            data = np.empty(mesh.mesh_dims, dtype="=f8")
+                            for i in range(2):
+                                for j in range(2):
+                                    for k in range(2):
+                                        id = mesh.mesh_blocks[i,j,k]
+                                        data[i*nx:(i+1)*nx,j*ny:(j+1)*ny,k*nz:(k+1)*nz] = \
+                                            ds[fdi,id,:,:,:].transpose()
+                        ind += mesh.select(selector, data, rv[field], ind)  # caches
+
+                else:
+                    for gs in grid_sequences(chunk.objs):
+                        start = gs[0].id - gs[0]._id_offset
+                        end = gs[-1].id - gs[-1]._id_offset + 1
+                        data = ds[fdi,start:end,:,:,:].transpose()
+                        for i, g in enumerate(gs):
+                            ind += g.select(selector, data[...,i], rv[field], ind)
         return rv
 
     def _read_chunk_data(self, chunk, fields):
+        if self.ds.logarithmic:
+            pass
         f = self._handle
         rv = {}
         for g in chunk.objs:


https://bitbucket.org/yt_analysis/yt/commits/1d7b5a9e3a05/
Changeset:   1d7b5a9e3a05
Branch:      yt
User:        jzuhone
Date:        2016-05-19 03:33:25+00:00
Summary:     Merge
Affected #:  27 files

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -1,6 +1,6 @@
 stephenskory at yahoo.com = s at skory.us
 "Stephen Skory stephenskory at yahoo.com" = s at skory.us
-yuan at astro.columbia.edu = bear0980 at gmail.com
+bear0980 at gmail.com = yuan at astro.columbia.edu
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
@@ -19,7 +19,6 @@
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
 jcforbes at ucsc.edu = jforbes at ucolick.org
-ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
 fbogert = fbogert at ucsc.edu
@@ -39,4 +38,12 @@
 jnaiman at ucolick.org = jnaiman
 migueld.deval = miguel at archlinux.net
 slevy at ncsa.illinois.edu = salevy at illinois.edu
-malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file
+malzraa at gmail.com = kellerbw at mcmaster.ca
+None = convert-repo
+dfenn = df11c at my.fsu.edu
+langmm = langmm.astro at gmail.com
+jmt354 = jmtomlinson95 at gmail.com
+desika = dnarayan at haverford.edu
+Ben Thompson = bthompson2090 at gmail.com
+goldbaum at ucolick.org = ngoldbau at illinois.edu
+ngoldbau at ucsc.edu = ngoldbau at illinois.edu

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -10,6 +10,7 @@
                 Alex Bogert (fbogert at ucsc.edu)
                 André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
+                Yi-Hao Chen (yihaochentw at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
@@ -25,10 +26,12 @@
                 William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
+                David Hannasch (David.A.Hannasch at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
                 Anni Järvenpää (anni.jarvenpaa at gmail.com)
                 Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Maximilian Katz (maximilian.katz at stonybrook.edu)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
@@ -60,6 +63,7 @@
                 Anna Rosen (rosen at ucolick.org)
                 Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
+                Hsi-Yu Schive (hyschive at gmail.com)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
                 Pat Shriwise (shriwise at wisc.edu)
@@ -75,6 +79,7 @@
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
                 Robert Thompson (rthompsonj at gmail.com)
+                Joseph Tomlinson (jmtomlinson95 at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -775,32 +775,38 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+a plot file, checkpoint file, or particle file. Particle files require special handling
+depending on the situation, the main issue being that they typically lack grid information. 
+The first case is when you have a plotfile and a particle file that you would like to 
+load together. In the simplest case, this occurs automatically. For instance, if you
+were in a directory with the following files:
 
 .. code-block:: none
 
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
+   radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
+   radio_halo_1kpc_hdf5_part_0100 # particle file
 
-   import yt
-   ds = yt.load("cosmoSim_coolhdf5_chk_0026")
+where the plotfile and the particle file were created at the same time (therefore having 
+particle data consistent with the grid structure of the former). Notice also that the 
+prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
+the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
+This also works when loading a number of files in a time series.
 
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
+If the two files do not have the same prefix and number, but they nevertheless have the same
+grid structure and are at the same simulation time, the particle data may be loaded with the
+``particle_filename`` optional argument to ``yt.load``:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
+However, if you don't have a corresponding plotfile for a particle file, but would still 
+like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+grid information will not be available, and the particle data will be loaded in a fashion
+similar to SPH data. 
+
 .. rubric:: Caveats
 
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e setup.py
--- a/setup.py
+++ b/setup.py
@@ -452,7 +452,7 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES + SHADERS_FILES,
+    data_files=MAPSERVER_FILES + [(SHADERS_DIR, SHADERS_FILES)],
     ext_modules=cython_extensions + extensions
 )
 

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -14,7 +14,7 @@
   local_fits_000:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_000:
+  local_flash_001:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_000:

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -35,10 +35,12 @@
 
 class LightRay(CosmologySplice):
     """
-    A LightRay object is a one-dimensional object representing the trajectory 
-    of a ray of light as it passes through one or more datasets (simple and
-    compound rays respectively).  One can sample any of the fields intersected
-    by the LightRay object as it passed through the dataset(s).
+    A 1D object representing the path of a light ray passing through a 
+    simulation.  LightRays can be either simple, where they pass through a 
+    single dataset, or compound, where they pass through consecutive 
+    datasets from the same cosmological simulation.  One can sample any of 
+    the fields intersected by the LightRay object as it passed through 
+    the dataset(s).
     
     For compound rays, the LightRay stacks together multiple datasets in a time
     series in order to approximate a LightRay's path through a volume

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -433,57 +433,62 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
+class OctreeSubsetBlockSlicePosition(object):
+    def __init__(self, ind, block_slice):
+        self.ind = ind
+        self.block_slice = block_slice
+        nz = self.block_slice.octree_subset.nz
+        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+
+    def __getitem__(self, key):
+        bs = self.block_slice
+        rv = bs.octree_subset[key][:,:,:,self.ind]
+        if bs.octree_subset._block_reorder:
+            rv = rv.copy(order=bs.octree_subset._block_reorder)
+        return rv
+
+    @property
+    def id(self):
+        return self.ind
+
+    @property
+    def Level(self):
+        return self.block_slice._ires[0,0,0,self.ind]
+
+    @property
+    def LeftEdge(self):
+        LE = (self.block_slice._fcoords[0,0,0,self.ind,:]
+            - self.block_slice._fwidth[0,0,0,self.ind,:]*0.5)
+        return LE
+
+    @property
+    def RightEdge(self):
+        RE = (self.block_slice._fcoords[-1,-1,-1,self.ind,:]
+            + self.block_slice._fwidth[-1,-1,-1,self.ind,:]*0.5)
+        return RE
+
+    @property
+    def dds(self):
+        return self.block_slice._fwidth[0,0,0,self.ind,:]
+
+    def clear_data(self):
+        pass
+
+    def get_vertex_centered_data(self, *args, **kwargs):
+        raise NotImplementedError
+
+
 class OctreeSubsetBlockSlice(object):
     def __init__(self, octree_subset):
-        self.ind = None
         self.octree_subset = octree_subset
         # Cache some attributes
-        nz = octree_subset.nz
-        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
         for attr in ["ires", "icoords", "fcoords", "fwidth"]:
             v = getattr(octree_subset, attr)
             setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
 
     def __iter__(self):
         for i in range(self._ires.shape[-1]):
-            self.ind = i
-            yield i, self
-
-    def clear_data(self):
-        pass
-
-    def __getitem__(self, key):
-        rv = self.octree_subset[key][:,:,:,self.ind]
-        if self.octree_subset._block_reorder:
-            rv = rv.copy(order=self.octree_subset._block_reorder)
-        return rv
-
-    def get_vertex_centered_data(self, *args, **kwargs):
-        raise NotImplementedError
-
-    @property
-    def id(self):
-        return np.random.randint(1)
-
-    @property
-    def Level(self):
-        return self._ires[0,0,0,self.ind]
-
-    @property
-    def LeftEdge(self):
-        LE = (self._fcoords[0,0,0,self.ind,:]
-            - self._fwidth[0,0,0,self.ind,:]*0.5)
-        return LE
-
-    @property
-    def RightEdge(self):
-        RE = (self._fcoords[-1,-1,-1,self.ind,:]
-            + self._fwidth[-1,-1,-1,self.ind,:]*0.5)
-        return RE
-
-    @property
-    def dds(self):
-        return self._fwidth[0,0,0,self.ind,:]
+            yield i, OctreeSubsetBlockSlicePosition(i, self)
 
 class YTPositionArray(YTArray):
     @property

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -444,6 +444,16 @@
             if "AppendActiveParticleType" in self.dataset.parameters:
                 ap_fields = self._detect_active_particle_fields()
                 field_list = list(set(field_list).union(ap_fields))
+                if not any(f[0] == 'io' for f in field_list):
+                    if 'io' in self.dataset.particle_types_raw:
+                        ptypes_raw = list(self.dataset.particle_types_raw)
+                        ptypes_raw.remove('io')
+                        self.dataset.particle_types_raw = tuple(ptypes_raw)
+
+                    if 'io' in self.dataset.particle_types:
+                        ptypes = list(self.dataset.particle_types)
+                        ptypes.remove('io')
+                        self.dataset.particle_types = tuple(ptypes)
             ptypes = self.dataset.particle_types
             ptypes_raw = self.dataset.particle_types_raw
         else:

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -126,7 +126,9 @@
 def test_active_particle_datasets():
     two_sph = data_dir_load(two_sphere_test)
     assert 'AccretingParticle' in two_sph.particle_types_raw
-    assert_equal(len(two_sph.particle_unions), 0)
+    assert 'io' not in two_sph.particle_types_raw
+    assert 'all' in two_sph.particle_types
+    assert_equal(len(two_sph.particle_unions), 1)
     pfields = ['GridID', 'creation_time', 'dynamical_time',
                'identifier', 'level', 'metallicity', 'particle_mass']
     pfields += ['particle_position_%s' % d for d in 'xyz']

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -154,7 +154,8 @@
                                               units_override=units_override)
         self.index_filename = filename
         self.storage_filename = storage_filename
-        self.default_field = ("connect1", "diffused")
+        self.default_field = [f for f in self.field_list 
+                              if f[0] == 'connect1'][-1]
 
     def _set_code_unit_attributes(self):
         # This is where quantities are created that represent the various

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -16,12 +16,14 @@
 from .data_structures import \
       FLASHGrid, \
       FLASHHierarchy, \
-      FLASHDataset
+      FLASHDataset, \
+      FLASHParticleDataset
 
 from .fields import \
       FLASHFieldInfo
 
 from .io import \
-      IOHandlerFLASH
+      IOHandlerFLASH, \
+      IOHandlerFLASHParticle
 
 from . import tests

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -18,13 +18,15 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.funcs import mylog
 from yt.geometry.grid_geometry_handler import \
     GridIndex
-from yt.data_objects.static_output import \
-    Dataset
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.physical_ratios import cm_per_mpc
@@ -251,8 +253,6 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         self.temperature_unit = self.quan(temperature_factor, "K")
-        # Still need to deal with:
-        #self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         
     def set_code_units(self):
@@ -437,3 +437,52 @@
 
     def close(self):
         self._handle.close()
+
+class FLASHParticleFile(ParticleFile):
+    pass
+
+class FLASHParticleDataset(FLASHDataset):
+    _index_class = ParticleIndex
+    over_refine_factor = 1
+    filter_bbox = False
+    _file_class = FLASHParticleFile
+
+    def __init__(self, filename, dataset_type='flash_particle_hdf5',
+                 storage_filename = None,
+                 units_override = None,
+                 n_ref = 64, unit_system = "cgs"):
+
+        if self._handle is not None: return
+        self._handle = HDF5FileHandler(filename)
+        self.n_ref = n_ref
+        self.refine_by = 2
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.storage_filename = storage_filename
+
+    def _parse_parameter_file(self):
+        # Let the superclass do all the work but then
+        # fix the domain dimensions
+        super(FLASHParticleDataset, self)._parse_parameter_file()
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.zeros(3, "int32")
+        self.domain_dimensions[:self.dimensionality] = nz
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = HDF5FileHandler(args[0])
+            if "bounding box" not in fileh["/"].keys() \
+                and "localnp" in fileh["/"].keys():
+                return True
+        except IOError:
+            pass
+        return False
+
+    @classmethod
+    def _guess_candidates(cls, base, directories, files):
+        candidates = [_ for _ in files if "_hdf5_part_" in _]
+        # Typically, Flash won't have nested outputs.
+        return candidates, (len(candidates) == 0)

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -20,6 +20,8 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import AlwaysSelector
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def particle_sequences(grids):
@@ -34,6 +36,16 @@
         seq = list(v[1] for v in g)
         yield seq
 
+def determine_particle_fields(handle):
+    try:
+        particle_fields = [s[0].decode("ascii","ignore").strip()
+                           for s in handle["/particle names"][:]]
+        _particle_fields = dict([("particle_" + s, i) for i, s in
+                                 enumerate(particle_fields)])
+    except KeyError:
+        _particle_fields = {}
+    return _particle_fields
+
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = False
     _dataset_type = "flash_hdf5"
@@ -43,15 +55,7 @@
         # Now we cache the particle fields
         self._handle = ds._handle
         self._particle_handle = ds._particle_handle
-        
-        try :
-            particle_fields = [s[0].decode("ascii","ignore").strip()
-                               for s in
-                               self._particle_handle["/particle names"][:]]
-            self._particle_fields = dict([("particle_" + s, i) for i, s in
-                                          enumerate(particle_fields)])
-        except KeyError:
-            self._particle_fields = {}
+        self._particle_fields = determine_particle_fields(self._particle_handle)
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -154,3 +158,96 @@
                     rv[g.id][field] = np.asarray(data[...,i], "=f8")
         return rv
 
+class IOHandlerFLASHParticle(BaseIOHandler):
+    _particle_reader = True
+    _dataset_type = "flash_particle_hdf5"
+
+    def __init__(self, ds):
+        super(IOHandlerFLASHParticle, self).__init__(ds)
+        # Now we cache the particle fields
+        self._handle = ds._handle
+        self._particle_fields = determine_particle_fields(self._handle)
+        self._position_fields = [self._particle_fields["particle_pos%s" % ax]
+                                 for ax in 'xyz']
+        self._chunksize = 32**3
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        fi = self._particle_fields[field]
+                        data = p_fields[total-count:total, fi]
+                        yield (ptype, field), data[mask]
+
+    def _initialize_index(self, data_file, regions):
+        p_fields = self._handle["/tracer particles"]
+        px, py, pz = self._position_fields
+        pcount = self._count_particles(data_file)["io"]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(self._chunksize, pcount - ind)
+            pos = np.empty((npart, 3), dtype="=f8")
+            pos[:,0] = p_fields[ind:ind+npart, px]
+            pos[:,1] = p_fields[ind:ind+npart, py]
+            pos[:,2] = p_fields[ind:ind+npart, pz]
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = \
+                compute_morton(pos[:,0], pos[:,1], pos[:,2],
+                               data_file.ds.domain_left_edge,
+                               data_file.ds.domain_right_edge)
+            ind += self._chunksize
+        return morton
+
+    def _count_particles(self, data_file):
+        pcount = {"io": self._handle["/localnp"][:].sum()}
+        return pcount
+
+    def _identify_fields(self, data_file):
+        fields = [("io", field) for field in self._particle_fields]
+        return fields, {}

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -20,8 +20,11 @@
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    data_dir_load
-from yt.frontends.flash.api import FLASHDataset
+    data_dir_load, \
+    sph_answer
+from yt.frontends.flash.api import FLASHDataset, \
+    FLASHParticleDataset
+from collections import OrderedDict
 
 _fields = ("temperature", "density", "velocity_magnitude")
 
@@ -45,7 +48,6 @@
         test_wind_tunnel.__name__ = test.description
         yield test
 
-
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
@@ -54,3 +56,28 @@
 def test_units_override():
     for test in units_override_check(sloshing):
         yield test
+
+fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
+
+fid_1to3_b1_fields = OrderedDict(
+    [
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "all_cic_velocity_x"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_y"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_z"), ("deposit", "all_cic")),
+    ]
+)
+
+
+ at requires_file(fid_1to3_b1)
+def test_FLASHParticleDataset():
+    assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset)
+
+ at requires_ds(fid_1to3_b1, big_data=True)
+def test_fid_1to3_b1():
+    ds = data_dir_load(fid_1to3_b1)
+    for test in sph_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields):
+        test_fid_1to3_b1.__name__ = test.description
+        yield test

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1826,5 +1826,7 @@
 
     sds._node_fields = node_data[0].keys()
     sds._elem_fields = elem_data[0].keys()
+    sds.default_field = [f for f in sds.field_list 
+                         if f[0] == 'connect1'][-1]
 
     return sds

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -318,7 +318,10 @@
         ipshell(header = __header % dd,
                 local_ns = loc, global_ns = glo)
     else:
-        from IPython.config.loader import Config
+        try:
+            from traitlets.config.loader import Config
+        except ImportError:
+            from IPython.config.loader import Config
         cfg = Config()
         cfg.InteractiveShellEmbed.local_ns = loc
         cfg.InteractiveShellEmbed.global_ns = glo

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -366,8 +366,12 @@
         """ Test unit inequality. """
         if not isinstance(u, Unit):
             return True
-        return \
-          (self.base_value != u.base_value or self.dimensions != u.dimensions)
+        if self.base_value != u.base_value:
+            return True
+        # use 'is' comparison dimensions to avoid expensive sympy operation
+        if self.dimensions is u.dimensions:
+            return False
+        return self.dimensions != u.dimensions
 
     def copy(self):
         return copy.deepcopy(self)
@@ -389,6 +393,9 @@
 
     def same_dimensions_as(self, other_unit):
         """ Test if dimensions are the same. """
+        # test first for 'is' equality to avoid expensive sympy operation
+        if self.dimensions is other_unit.dimensions:
+            return True
         return (self.dimensions / other_unit.dimensions) == sympy_one
 
     @property

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -174,6 +174,31 @@
 
     return other
 
+ at lru_cache(maxsize=128, typed=False)
+def _unit_repr_check_same(my_units, other_units):
+    """
+    Takes a Unit object, or string of known unit symbol, and check that it
+    is compatible with this quantity. Returns Unit object.
+
+    """
+    # let Unit() handle units arg if it's not already a Unit obj.
+    if not isinstance(other_units, Unit):
+        other_units = Unit(other_units, registry=my_units.registry)
+
+    equiv_dims = em_dimensions.get(my_units.dimensions, None)
+    if equiv_dims == other_units.dimensions:
+        if current_mks in equiv_dims.free_symbols:
+            base = "SI"
+        else:
+            base = "CGS"
+        raise YTEquivalentDimsError(my_units, other_units, base)
+
+    if not my_units.same_dimensions_as(other_units):
+        raise YTUnitConversionError(
+            my_units, my_units.dimensions, other_units, other_units.dimensions)
+
+    return other_units
+
 unary_operators = (
     negative, absolute, rint, ones_like, sign, conj, exp, exp2, log, log2,
     log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
@@ -430,30 +455,6 @@
     # Start unit conversion methods
     #
 
-    def _unit_repr_check_same(self, units):
-        """
-        Takes a Unit object, or string of known unit symbol, and check that it
-        is compatible with this quantity. Returns Unit object.
-
-        """
-        # let Unit() handle units arg if it's not already a Unit obj.
-        if not isinstance(units, Unit):
-            units = Unit(units, registry=self.units.registry)
-
-        equiv_dims = em_dimensions.get(self.units.dimensions,None)
-        if equiv_dims == units.dimensions:
-            if current_mks in equiv_dims.free_symbols:
-                base = "SI"
-            else:
-                base = "CGS"
-            raise YTEquivalentDimsError(self.units, units, base)
-
-        if not self.units.same_dimensions_as(units):
-            raise YTUnitConversionError(
-                self.units, self.units.dimensions, units, units.dimensions)
-
-        return units
-
     def convert_to_units(self, units):
         """
         Convert the array and units to the given units.
@@ -464,7 +465,7 @@
             The units you want to convert to.
 
         """
-        new_units = self._unit_repr_check_same(units)
+        new_units = _unit_repr_check_same(self.units, units)
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
@@ -523,11 +524,10 @@
         YTArray
 
         """
-        new_units = self._unit_repr_check_same(units)
+        new_units = _unit_repr_check_same(self.units, units)
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
-        new_array = self * conversion_factor
-        new_array.units = new_units
+        new_array = type(self)(self.ndview * conversion_factor, new_units)
 
         if offset:
             np.subtract(new_array, offset*new_array.uq, new_array)

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -692,7 +692,10 @@
                   "\nHi there!  Welcome to yt.\n\nWe've loaded your dataset as 'ds'.  Enjoy!"
                   )
         else:
-            from IPython.config.loader import Config
+            try:
+                from traitlets.config.loader import Config
+            except ImportError:
+                from IPython.config.loader import Config
             import sys
             cfg = Config()
             # prepend sys.path with current working directory

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -438,7 +438,6 @@
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
     if best_dim == -1:
-        print 'Failed to split grid.'
         return -1
 
 
@@ -509,6 +508,11 @@
             best_dim = dim
             my_max = n_unique
             my_split = (n_unique-1)/2
+    if best_dim == -1:
+        for i in range(3):
+            free(uniquedims[i])
+        free(uniquedims)
+        return -1, 0, 0, 0
     # I recognize how lame this is.
     cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
     for i in range(my_max):

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -37,7 +37,7 @@
   {-1, -1, -1}
 };
 
-// Triangule wedges
+// Triangulate wedges
 int triangulate_wedge[MAX_NUM_TRI][3] = {
   {0, 1, 2},
   {0, 3, 1},

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -18,6 +18,7 @@
 from collections import OrderedDict
 import matplotlib.cm as cm
 import numpy as np
+import ctypes
 
 from yt.utilities.math_utils import \
     get_translate_matrix, \
@@ -29,7 +30,6 @@
     rotation_matrix_to_quaternion
 from .shader_objects import known_shaders, ShaderProgram
 
-
 bbox_vertices = np.array(
       [[ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  1.],
@@ -77,6 +77,27 @@
      +1.0, +1.0, 0.0], dtype=np.float32
 )
 
+triangulate_hex = np.array([
+    [0, 2, 1], [0, 3, 2],
+    [4, 5, 6], [4, 6, 7],
+    [0, 1, 5], [0, 5, 4],
+    [1, 2, 6], [1, 6, 5],
+    [0, 7, 3], [0, 4, 7],
+    [3, 6, 2], [3, 7, 6]]
+)
+
+triangulate_tetra = np.array([
+    [0, 1, 3], [2, 3, 1],
+    [0, 3, 2], [0, 2, 1]]
+)
+
+triangulate_wedge = np.array([
+    [3, 0, 1], [4, 3, 1],
+    [2, 5, 4], [2, 4, 1],
+    [0, 3, 2], [2, 3, 5],
+    [3, 4, 5], [0, 2, 1]]
+)
+
 
 class IDVCamera(object):
     '''Camera object used in the Interactive Data Visualization
@@ -532,8 +553,180 @@
                         GL.GL_RED, GL.GL_FLOAT, n_data.T)
             GL.glGenerateMipmap(GL.GL_TEXTURE_3D)
 
+class ColorBarSceneComponent(SceneComponent):
+    ''' 
 
-class SceneGraph(SceneComponent):
+    A class for scene components that apply colorbars using a 1D texture. 
+
+    '''
+
+    def __init__(self):
+        super(ColorBarSceneComponent, self).__init__()
+        self.camera = None
+        self.cmap_texture = None
+
+    def set_camera(self, camera):
+        pass
+
+    def update_minmax(self):
+        pass
+
+    def setup_cmap_tex(self):
+        '''Creates 1D texture that will hold colormap in framebuffer'''
+        self.cmap_texture = GL.glGenTextures(1)   # create target texture
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
+        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
+        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
+        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
+                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
+
+    def update_cmap_tex(self):
+        '''Updates 1D texture with colormap that's used in framebuffer'''
+        if self.camera is None or not self.camera.cmap_new:
+            return
+
+        if self.cmap_texture is None:
+            self.setup_cmap_tex()
+
+        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
+                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
+        self.camera.cmap_new = False
+    
+class MeshSceneComponent(ColorBarSceneComponent):
+    '''
+
+    A scene component for representing unstructured mesh data.
+
+    '''
+
+    def __init__(self, data_source, field):
+        super(MeshSceneComponent, self).__init__()
+        self.set_shader("mesh.v")
+        self.set_shader("mesh.f")
+
+        self.data_source = None
+        self.redraw = True
+
+        GL.glEnable(GL.GL_DEPTH_TEST)
+        GL.glDepthFunc(GL.GL_LESS)
+        GL.glEnable(GL.GL_CULL_FACE)
+        GL.glCullFace(GL.GL_BACK)
+
+        vertices, data, indices = self.get_mesh_data(data_source, field)
+
+        self._initialize_vertex_array("mesh_info")
+        GL.glBindVertexArray(self.vert_arrays["mesh_info"])
+
+        self.add_vert_attrib("vertex_buffer", vertices, vertices.size)
+        self.add_vert_attrib("data_buffer", data, data.size)
+
+        self.vert_attrib["element_buffer"] = (GL.glGenBuffers(1), indices.size)
+        GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+        GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL.GL_STATIC_DRAW)
+
+        self.transform_matrix = GL.glGetUniformLocation(self.program.program,
+                                                        "model_to_clip")
+
+        self.cmin = data.min()
+        self.cmax = data.max()
+
+    def set_camera(self, camera):
+        r""" Sets the camera orientation for the entire scene.
+
+        Parameters
+        ----------
+        camera : Camera
+
+        """
+        self.camera = camera
+        self.camera.cmap_min = float(self.cmin)
+        self.camera.cmap_max = float(self.cmax)
+        self.redraw = True
+
+    def get_mesh_data(self, data_source, field):
+        """
+        
+        This reads the mesh data into a form that can be fed in to OpenGL.
+        
+        """
+
+        # get mesh information
+        ftype, fname = field
+        mesh_id = int(ftype[-1])
+        mesh = data_source.ds.index.meshes[mesh_id-1]
+        offset = mesh._index_offset
+        vertices = mesh.connectivity_coords
+        indices  = mesh.connectivity_indices - offset
+
+        # get vertex data
+        data = data_source[field]
+        vertex_data = np.zeros(vertices.shape[0], dtype=data.dtype)
+        vertex_data[indices.flatten()] = data.flatten()
+
+        if indices.shape[1] == 8:
+            tri_array = triangulate_hex
+        elif indices.shape[1] == 4:
+            tri_array = triangulate_tetra
+        elif indices.shape[1] == 6:
+            tri_array = triangulate_wedge
+        else:
+            raise NotImplementedError
+
+        tri_indices = []
+        for elem in indices:
+            for tri in tri_array:
+                tri_indices.append(elem[tri])
+        tri_indices = np.array(tri_indices)
+
+        v = vertices.astype(np.float32).flatten()
+        d = vertex_data.astype(np.float32).flatten()
+        i = tri_indices.astype(np.uint32).flatten()
+
+        return v, d, i
+
+    def run_program(self):
+        """ Renders one frame of the scene. """
+        with self.program.enable():
+
+            # Handle colormap
+            self.update_cmap_tex()
+
+            GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
+            projection_matrix = self.camera.projection_matrix
+            view_matrix = self.camera.view_matrix
+            model_to_clip = np.dot(projection_matrix, view_matrix)
+            GL.glUniformMatrix4fv(self.transform_matrix, 1, True, model_to_clip)
+
+            GL.glActiveTexture(GL.GL_TEXTURE1)
+            GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
+
+            self.program._set_uniform("cmap", 0)
+            self.program._set_uniform("cmap_min", self.camera.cmap_min)
+            self.program._set_uniform("cmap_max", self.camera.cmap_max)
+
+            GL.glEnableVertexAttribArray(0)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["vertex_buffer"][0])
+            GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glEnableVertexAttribArray(1)
+            GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vert_attrib["data_buffer"][0])
+            GL.glVertexAttribPointer(1, 1, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
+
+            GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.vert_attrib["element_buffer"][0])
+            GL.glDrawElements(GL.GL_TRIANGLES, self.vert_attrib["element_buffer"][1],
+                              GL.GL_UNSIGNED_INT, ctypes.c_void_p(0))
+
+            GL.glDisableVertexAttribArray(0)
+            GL.glDisableVertexAttribArray(1)
+
+    render = run_program
+
+
+class SceneGraph(ColorBarSceneComponent):
     """A basic OpenGL render for IDV.
 
     The SceneGraph class is the primary driver behind creating a IDV rendering.
@@ -554,8 +747,6 @@
         self.collections = []
         self.fbo = None
         self.fb_texture = None
-        self.cmap_texture = None
-        self.camera = None
         self.shader_program = None
         self.fb_shader_program = None
         self.min_val, self.max_val = 1e60, -1e60
@@ -587,36 +778,8 @@
 
         self.setup_fb(self.width, self.height)
 
-    def setup_cmap_tex(self):
-        '''Creates 1D texture that will hold colormap in framebuffer'''
-        self.cmap_texture = GL.glGenTextures(1)   # create target texture
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
-        GL.glTexParameterf(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
-        GL.glTexParameteri(GL.GL_TEXTURE_1D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
-        GL.glTexImage1D(GL.GL_TEXTURE_1D, 0, GL.GL_RGBA, 256,
-                        0, GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-
-
-    def update_cmap_tex(self):
-        '''Updates 1D texture with colormap that's used in framebuffer'''
-        if self.camera is None or not self.camera.cmap_new:
-            return
-
-        if self.cmap_texture is None:
-            self.setup_cmap_tex()
-
-        GL.glBindTexture(GL.GL_TEXTURE_1D, self.cmap_texture)
-        GL.glTexSubImage1D(GL.GL_TEXTURE_1D, 0, 0, 256,
-                           GL.GL_RGBA, GL.GL_FLOAT, self.camera.cmap)
-        GL.glBindTexture(GL.GL_TEXTURE_1D, 0)
-        self.camera.cmap_new = False
-
-
     def setup_fb(self, width, height):
-        '''Setups FrameBuffer that will be used as container
+        '''Sets up FrameBuffer that will be used as container
            for 1 pass of rendering'''
         # Clean up old FB and Texture
         if self.fb_texture is not None and \
@@ -670,7 +833,6 @@
         status = GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)
         assert status == GL.GL_FRAMEBUFFER_COMPLETE, status
 
-
     def add_collection(self, collection):
         r"""Adds a block collection to the scene. Collections must not overlap.
 

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/visualization/volume_rendering/interactive_vr_helpers.py
--- a/yt/visualization/volume_rendering/interactive_vr_helpers.py
+++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py
@@ -58,7 +58,8 @@
         raise ImportError("This functionality requires the cyglfw3 and PyOpenGL "
                           "packages to be installed.")
 
-    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera
+    from .interactive_vr import SceneGraph, BlockCollection, TrackballCamera, \
+        MeshSceneComponent
     from .interactive_loop import RenderingContext
 
     if isinstance(data_source, Dataset):
@@ -78,16 +79,23 @@
     if cam_focus is None:
         cam_focus = dobj.ds.domain_center
 
+    rc = RenderingContext(*window_size)
+
+    if hasattr(dobj.ds.index, "meshes"):
+        # unstructured mesh datasets tend to have tight
+        # domain boundaries, do some extra padding here.
+        cam_position = 3.0*dobj.ds.domain_right_edge
+        scene = MeshSceneComponent(dobj, field)
+    else:
+        scene = SceneGraph()
+        collection = BlockCollection()
+        collection.add_data(dobj, field)
+        scene.add_collection(collection)
+
     aspect_ratio = window_size[1] / window_size[0]
     far_plane = np.linalg.norm(cam_focus - cam_position) * 2.0
     near_plane = 0.01 * far_plane
 
-    rc = RenderingContext(*window_size)
-    scene = SceneGraph()
-    collection = BlockCollection()
-    collection.add_data(dobj, field)
-    scene.add_collection(collection)
-
     c = TrackballCamera(position=cam_position, focus=cam_focus, near_plane=near_plane,
                         far_plane=far_plane, aspect_ratio=aspect_ratio)
     rc.start_loop(scene, c)

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -29,7 +29,7 @@
 class ShaderProgram(object):
     '''
     Wrapper class that compiles and links vertex and fragment shaders
-    into shader program.
+    into a shader program.
 
     Parameters
     ----------
@@ -269,3 +269,13 @@
     '''A second pass vertex shader that performs no operations on vertices'''
     _source = "passthrough.vertexshader"
     _shader_name = "passthrough.v"
+
+class MeshVertexShader(VertexShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.vertexshader"
+    _shader_name = "mesh.v"
+
+class MeshFragmentShader(FragmentShader):
+    '''A vertex shader used for unstructured mesh rendering.'''
+    _source = "mesh.fragmentshader"
+    _shader_name = "mesh.f"

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/visualization/volume_rendering/shaders/mesh.fragmentshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.fragmentshader
@@ -0,0 +1,17 @@
+#version 330 core
+
+in float fragmentData;
+out vec4 color;
+
+uniform sampler1D cmap;
+uniform float cmap_min;
+uniform float cmap_max;
+
+void main()
+{
+    float data = fragmentData;
+    float cm = cmap_min;
+    float cp = cmap_max;
+
+    color = texture(cmap, (data - cm) / (cp - cm));
+}

diff -r d1b3901bd636a9f9ad1c7f53741a672813fa11d2 -r 1d7b5a9e3a05acd2935577d615d99c947acce17e yt/visualization/volume_rendering/shaders/mesh.vertexshader
--- /dev/null
+++ b/yt/visualization/volume_rendering/shaders/mesh.vertexshader
@@ -0,0 +1,11 @@
+#version 330 core
+
+layout(location = 0) in vec3 vertexPosition_modelspace;
+layout(location = 1) in float vertexData;
+out float fragmentData;
+uniform mat4 model_to_clip;
+void main()
+{
+    gl_Position = model_to_clip * vec4(vertexPosition_modelspace, 1);
+    fragmentData = vertexData;
+}


https://bitbucket.org/yt_analysis/yt/commits/8e8e29c17a62/
Changeset:   8e8e29c17a62
Branch:      yt
User:        jzuhone
Date:        2016-05-19 03:56:30+00:00
Summary:     Code cleanup
Affected #:  2 files

diff -r 1d7b5a9e3a05acd2935577d615d99c947acce17e -r 8e8e29c17a624584800b94a115c2618983fd154b yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -78,16 +78,13 @@
         num_blocks = self._handle.attrs["NumMeshBlocks"]
         log_loc = self._handle['LogicalLocations']
         levels = self._handle["Levels"]
-        nx, ny, nz = self._handle.attrs["MeshBlockSize"]
         x1f = self._handle["x1f"]
         x2f = self._handle["x2f"]
         x3f = self._handle["x3f"]
-        nblock_x = np.max(log_loc[:,0])+1
-        nblock_y = np.max(log_loc[:,1])+1
-        nblock_z = np.max(log_loc[:,2])+1
+        nblock = tuple(np.max(log_loc, axis=0)+1)
 
-        block_grid = -np.ones((nblock_x,nblock_y,nblock_z), dtype=np.int)
-        level_grid = -np.ones((nblock_x,nblock_y,nblock_z), dtype=np.int)
+        block_grid = np.zeros(nblock, dtype=np.int)
+        level_grid = np.zeros(nblock, dtype=np.int)
 
         for i in range(num_blocks):
             block_grid[log_loc[i][0],log_loc[i][1],log_loc[i][2]] = i
@@ -135,12 +132,8 @@
             cycle.shape = ((nxm-1)*(nym-1)*(nzm-1), 3)
             off = _cis + cycle[:, np.newaxis]
             connectivity = ((off[:,:,0] * nym) + off[:,:,1]) * nzm + off[:,:,2]
-            mesh = AthenaPPLogarithmicMesh(i,
-                                           self.index_filename,
-                                           connectivity,
-                                           coords,
-                                           self,
-                                           bc[i],
+            mesh = AthenaPPLogarithmicMesh(i, self.index_filename, connectivity,
+                                           coords, self, bc[i],
                                            np.array([nxm-1, nym-1, nzm-1]))
             self.meshes.append(mesh)
         mylog.debug("Done setting up meshes.")

diff -r 1d7b5a9e3a05acd2935577d615d99c947acce17e -r 8e8e29c17a624584800b94a115c2618983fd154b yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -63,7 +63,7 @@
                             id = mesh.mesh_blocks[0]
                             data = ds[fdi,id,:,:,:].transpose()
                         else:
-                            nx, ny, nz = mesh.mesh_dims/2
+                            nx, ny, nz = mesh.mesh_dims//2
                             data = np.empty(mesh.mesh_dims, dtype="=f8")
                             for i in range(2):
                                 for j in range(2):


https://bitbucket.org/yt_analysis/yt/commits/63bdfe83c311/
Changeset:   63bdfe83c311
Branch:      yt
User:        jzuhone
Date:        2016-05-19 04:13:50+00:00
Summary:     This just leads to bad things everywhere
Affected #:  3 files

diff -r 8e8e29c17a624584800b94a115c2618983fd154b -r 63bdfe83c31137554aeec5a16075df2e0716fd7c yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -55,7 +55,6 @@
 
 class AthenaPPLogarithmicMesh(SemiStructuredMesh):
     _index_offset = 0
-    _id_offset = 0
 
     def __init__(self, mesh_id, filename, connectivity_indices,
                  connectivity_coords, index, blocks, dims):
@@ -66,7 +65,7 @@
         self.mesh_dims = dims
 
 class AthenaPPLogarithmicIndex(UnstructuredIndex):
-    def __init__(self, ds, dataset_type = 'athena++'):
+    def __init__(self, ds, dataset_type = 'athena_pp'):
         self._handle = ds._handle
         super(AthenaPPLogarithmicIndex, self).__init__(ds, dataset_type)
         self.index_filename = self.dataset.filename
@@ -139,7 +138,7 @@
         mylog.debug("Done setting up meshes.")
 
     def _detect_output_fields(self):
-        self.field_list = [("athena++", k) for k in self.ds._field_map]
+        self.field_list = [("athena_pp", k) for k in self.ds._field_map]
 
     def _count_selection(self, dobj, meshes = None):
         if meshes is None: meshes = dobj._chunk_info
@@ -176,10 +175,10 @@
 class AthenaPPHierarchy(GridIndex):
 
     grid = AthenaPPGrid
-    _dataset_type='athena++'
+    _dataset_type='athena_pp'
     _data_file = None
 
-    def __init__(self, ds, dataset_type='athena++'):
+    def __init__(self, ds, dataset_type='athena_pp'):
         self.dataset = weakref.proxy(ds)
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
@@ -189,7 +188,7 @@
         GridIndex.__init__(self, ds, dataset_type)
 
     def _detect_output_fields(self):
-        self.field_list = [("athena++", k) for k in self.dataset._field_map]
+        self.field_list = [("athena_pp", k) for k in self.dataset._field_map]
 
     def _count_grids(self):
         self.num_grids = self._handle.attrs["NumMeshBlocks"]
@@ -260,12 +259,12 @@
 
 class AthenaPPDataset(Dataset):
     _field_info_class = AthenaPPFieldInfo
-    _dataset_type = "athena++"
+    _dataset_type = "athena_pp"
 
-    def __init__(self, filename, dataset_type='athena++',
+    def __init__(self, filename, dataset_type='athena_pp',
                  storage_filename=None, parameters=None,
                  units_override=None, unit_system="code"):
-        self.fluid_types += ("athena++",)
+        self.fluid_types += ("athena_pp",)
         if parameters is None:
             parameters = {}
         self.specified_parameters = parameters

diff -r 8e8e29c17a624584800b94a115c2618983fd154b -r 63bdfe83c31137554aeec5a16075df2e0716fd7c yt/frontends/athena_pp/fields.py
--- a/yt/frontends/athena_pp/fields.py
+++ b/yt/frontends/athena_pp/fields.py
@@ -25,7 +25,7 @@
 
 def velocity_field(j):
     def _velocity(field, data):
-        return data["athena++", "mom%d" % j]/data["athena++","dens"]
+        return data["athena_pp", "mom%d" % j]/data["athena_pp","dens"]
     return _velocity
 
 class AthenaPPFieldInfo(FieldInfoContainer):
@@ -44,8 +44,8 @@
         # Add velocity fields
         vel_prefix = "velocity"
         for i, comp in enumerate(self.ds.coordinates.axis_order):
-            vel_field = ("athena++", "vel%d" % (i+1))
-            mom_field = ("athena++", "mom%d" % (i+1))
+            vel_field = ("athena_pp", "vel%d" % (i+1))
+            mom_field = ("athena_pp", "mom%d" % (i+1))
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
                 self.alias(("gas","%s_%s" % (vel_prefix, comp)), vel_field,
@@ -56,22 +56,22 @@
                 self.add_field(("gas","%s_%s" % (vel_prefix, comp)),
                                function=velocity_field(i+1), units=unit_system["velocity"])
         # Figure out thermal energy field
-        if ("athena++","pgas") in self.field_list:
-            self.add_output_field(("athena++","pgas"),
+        if ("athena_pp","pgas") in self.field_list:
+            self.add_output_field(("athena_pp","pgas"),
                                   units=pres_units)
-            self.alias(("gas","pressure"),("athena++","pgas"),
+            self.alias(("gas","pressure"),("athena_pp","pgas"),
                        units=unit_system["pressure"])
             def _thermal_energy(field, data):
                 return data["athena++","pgas"] / \
                        (data.ds.gamma-1.)/data["athena++","rho"]
-        elif ("athena++","Etot") in self.field_list:
+        elif ("athena_pp","Etot") in self.field_list:
             self.add_output_field(("athena++","Etot"),
                                   units=pres_units)
             def _thermal_energy(field, data):
-                eint = data["athena++", "Etot"] - data["gas","kinetic_energy"]
-                if ("athena++", "B1") in self.field_list:
+                eint = data["athena_pp", "Etot"] - data["gas","kinetic_energy"]
+                if ("athena_pp", "B1") in self.field_list:
                     eint -= data["gas","magnetic_energy"]
-                return eint/data["athena++","dens"]
+                return eint/data["athena_pp","dens"]
         self.add_field(("gas","thermal_energy"),
                        function=_thermal_energy,
                        units=unit_system["specific_energy"])
@@ -85,6 +85,6 @@
         self.add_field(("gas","temperature"), function=_temperature,
                        units=unit_system["temperature"])
 
-        setup_magnetic_field_aliases(self, "athena++", ["B%d" % ax for ax in (1,2,3)])
+        setup_magnetic_field_aliases(self, "athena_pp", ["B%d" % ax for ax in (1,2,3)])
 
 

diff -r 8e8e29c17a624584800b94a115c2618983fd154b -r 63bdfe83c31137554aeec5a16075df2e0716fd7c yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -29,7 +29,7 @@
 
 class IOHandlerAthenaPP(BaseIOHandler):
     _particle_reader = False
-    _dataset_type = "athena++"
+    _dataset_type = "athena_pp"
 
     def __init__(self, ds):
         super(IOHandlerAthenaPP, self).__init__(ds)


https://bitbucket.org/yt_analysis/yt/commits/dcf5e9d3be60/
Changeset:   dcf5e9d3be60
Branch:      yt
User:        jzuhone
Date:        2016-05-19 13:51:57+00:00
Summary:     Pretty substantial speedups in constructing the index here
Affected #:  1 file

diff -r 63bdfe83c31137554aeec5a16075df2e0716fd7c -r dcf5e9d3be605b05a60c2867b7505f93d8fad7c5 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -18,7 +18,7 @@
 import weakref
 
 from yt.funcs import \
-    mylog, \
+    mylog, get_pbar, \
     ensure_tuple
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -84,28 +84,27 @@
 
         block_grid = np.zeros(nblock, dtype=np.int)
         level_grid = np.zeros(nblock, dtype=np.int)
+        block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2]] = np.arange(num_blocks)
+        level_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2]] = levels[:]
 
-        for i in range(num_blocks):
-            block_grid[log_loc[i][0],log_loc[i][1],log_loc[i][2]] = i
-            level_grid[log_loc[i][0],log_loc[i][1],log_loc[i][2]] = levels[i]
-
-        block_list = list(range(num_blocks))
+        block_list = np.arange(num_blocks, dtype='int64')
         bc = []
         for i in range(num_blocks):
-            if i in block_list:
+            if block_list[i] >= 0:
                 ii, jj, kk = log_loc[i]
                 loc_levels = level_grid[ii:ii+2,jj:jj+2,kk:kk+2]
                 if np.unique(loc_levels).size == 1:
                     loc_ids = block_grid[ii:ii+2,jj:jj+2,kk:kk+2]
                     bc.append(loc_ids)
-                    [block_list.remove(k) for k in loc_ids.flat]
+                    block_list[loc_ids.flatten()] = -1
                 else:
-                    bc.append(np.array([i]))
-                    block_list.remove(i)
+                    bc.append(np.array(i))
+                    block_list[i] = -1
 
         num_meshes = len(bc)
 
         self.meshes = []
+        pbar = get_pbar("Constructing meshes", num_meshes)
         for i in range(num_meshes):
             if bc[i].size > 1:
                 ob = bc[i][0,0,0]
@@ -135,6 +134,8 @@
                                            coords, self, bc[i],
                                            np.array([nxm-1, nym-1, nzm-1]))
             self.meshes.append(mesh)
+            pbar.update(i)
+        pbar.finish()
         mylog.debug("Done setting up meshes.")
 
     def _detect_output_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/e2158d70fa00/
Changeset:   e2158d70fa00
Branch:      yt
User:        jzuhone
Date:        2016-05-19 14:22:27+00:00
Summary:     In theory this should be faster
Affected #:  2 files

diff -r dcf5e9d3be605b05a60c2867b7505f93d8fad7c5 -r e2158d70fa00cf6508306a283e7032448385a8dd yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -94,9 +94,9 @@
                 ii, jj, kk = log_loc[i]
                 loc_levels = level_grid[ii:ii+2,jj:jj+2,kk:kk+2]
                 if np.unique(loc_levels).size == 1:
-                    loc_ids = block_grid[ii:ii+2,jj:jj+2,kk:kk+2]
+                    loc_ids = block_grid[ii:ii+2,jj:jj+2,kk:kk+2].transpose().flatten()
                     bc.append(loc_ids)
-                    block_list[loc_ids.flatten()] = -1
+                    block_list[loc_ids] = -1
                 else:
                     bc.append(np.array(i))
                     block_list[i] = -1
@@ -107,10 +107,10 @@
         pbar = get_pbar("Constructing meshes", num_meshes)
         for i in range(num_meshes):
             if bc[i].size > 1:
-                ob = bc[i][0,0,0]
-                xb = bc[i][1,0,0]
-                yb = bc[i][0,1,0]
-                zb = bc[i][0,0,1]
+                ob = bc[i][0]
+                xb = bc[i][1]
+                yb = bc[i][2]
+                zb = bc[i][4]
                 x = np.concatenate([x1f[ob,:-1], x1f[xb,:]])
                 y = np.concatenate([x2f[ob,:-1], x2f[yb,:]])
                 z = np.concatenate([x3f[ob,:-1], x3f[zb,:]])

diff -r dcf5e9d3be605b05a60c2867b7505f93d8fad7c5 -r e2158d70fa00cf6508306a283e7032448385a8dd yt/frontends/athena_pp/io.py
--- a/yt/frontends/athena_pp/io.py
+++ b/yt/frontends/athena_pp/io.py
@@ -27,6 +27,10 @@
         seq = list(v[1] for v in g)
         yield seq
 
+ii = [0, 1, 0, 1, 0, 1, 0, 1]
+jj = [0, 0, 1, 1, 0, 0, 1, 1]
+kk = [0, 0, 0, 0, 1, 1, 1, 1]
+
 class IOHandlerAthenaPP(BaseIOHandler):
     _particle_reader = False
     _dataset_type = "athena_pp"
@@ -41,7 +45,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype != "athena++" for ftype, fname in fields)):
+        if any((ftype != "athena_pp" for ftype, fname in fields)):
             raise NotImplementedError
         f = self._handle
         rv = {}
@@ -60,17 +64,14 @@
                 if self.ds.logarithmic:
                     for mesh in chunk.objs:
                         if mesh.mesh_blocks.size == 1:
-                            id = mesh.mesh_blocks[0]
-                            data = ds[fdi,id,:,:,:].transpose()
+                            data = ds[fdi,mesh.mesh_blocks,:,:,:].transpose()
                         else:
                             nx, ny, nz = mesh.mesh_dims//2
                             data = np.empty(mesh.mesh_dims, dtype="=f8")
-                            for i in range(2):
-                                for j in range(2):
-                                    for k in range(2):
-                                        id = mesh.mesh_blocks[i,j,k]
-                                        data[i*nx:(i+1)*nx,j*ny:(j+1)*ny,k*nz:(k+1)*nz] = \
-                                            ds[fdi,id,:,:,:].transpose()
+                            ids = mesh.mesh_blocks
+                            for n in range(8):
+                                data[ii[n]*nx:(ii[n]+1)*nx,jj[n]*ny:(jj[n]+1)*ny,kk[n]*nz:(kk[n]+1)*nz] = \
+                                     ds[fdi,ids[n],:,:,:].transpose()
                         ind += mesh.select(selector, data, rv[field], ind)  # caches
 
                 else:


https://bitbucket.org/yt_analysis/yt/commits/1ab69da8de16/
Changeset:   1ab69da8de16
Branch:      yt
User:        jzuhone
Date:        2016-05-19 14:23:02+00:00
Summary:     This is a more sensible choice of default plot origin if we have a spherical or cylindrical geometry.
Affected #:  1 file

diff -r e2158d70fa00cf6508306a283e7032448385a8dd -r 1ab69da8de16a749157070bd19d8edfc84b62a94 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1228,6 +1228,10 @@
         if field_parameters is None:
             field_parameters = {}
 
+        if ds.geometry == "spherical" or ds.geometry == "cylindrical":
+            mylog.info("Setting origin='domain' for %s geometry." % ds.geometry)
+            origin = 'domain'
+
         if isinstance(ds, YTSpatialPlotDataset):
             slc = ds.all_data()
             slc.axis = axis


https://bitbucket.org/yt_analysis/yt/commits/58bcb5571636/
Changeset:   58bcb5571636
Branch:      yt
User:        jzuhone
Date:        2016-05-20 19:14:53+00:00
Summary:     This should be "native" instead.
Affected #:  1 file

diff -r 1ab69da8de16a749157070bd19d8edfc84b62a94 -r 58bcb557163672b1b61d63f3f2a40a09c57b2985 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1229,8 +1229,8 @@
             field_parameters = {}
 
         if ds.geometry == "spherical" or ds.geometry == "cylindrical":
-            mylog.info("Setting origin='domain' for %s geometry." % ds.geometry)
-            origin = 'domain'
+            mylog.info("Setting origin='native' for %s geometry." % ds.geometry)
+            origin = 'native'
 
         if isinstance(ds, YTSpatialPlotDataset):
             slc = ds.all_data()


https://bitbucket.org/yt_analysis/yt/commits/950f59767aeb/
Changeset:   950f59767aeb
Branch:      yt
User:        jzuhone
Date:        2016-05-20 19:31:52+00:00
Summary:     Use the default here
Affected #:  1 file

diff -r 58bcb557163672b1b61d63f3f2a40a09c57b2985 -r 950f59767aebd63a77bd29272b7be57e745fa216 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -141,11 +141,6 @@
     def _detect_output_fields(self):
         self.field_list = [("athena_pp", k) for k in self.ds._field_map]
 
-    def _count_selection(self, dobj, meshes = None):
-        if meshes is None: meshes = dobj._chunk_info
-        count = np.sum([m.count(dobj.selector) for m in meshes])
-        return count
-
 class AthenaPPGrid(AMRGridPatch):
     _id_offset = 0
 


https://bitbucket.org/yt_analysis/yt/commits/0271a6317bc4/
Changeset:   0271a6317bc4
Branch:      yt
User:        jzuhone
Date:        2016-05-20 20:28:16+00:00
Summary:     Stopgap until the PR that fixes this is merged in
Affected #:  1 file

diff -r 950f59767aebd63a77bd29272b7be57e745fa216 -r 0271a6317bc44fe5ca2ecff231b7c66ee0781f69 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -22,15 +22,6 @@
     def __init__(self, filename):
         self.handle = h5py.File(filename, 'r')
 
-    def __del__(self):
-        if self.handle is not None:
-            # In h5py 2.4 and newer file handles are closed automatically.
-            # so only close the handle on old versions.  This works around an
-            # issue in h5py when run under python 3.4.
-            # See https://github.com/h5py/h5py/issues/534
-            if LooseVersion(h5py.__version__) < '2.4.0':
-                self.handle.close()
-
     def __getitem__(self, key):
         return self.handle[key]
 


https://bitbucket.org/yt_analysis/yt/commits/87b7b8ee6c51/
Changeset:   87b7b8ee6c51
Branch:      yt
User:        jzuhone
Date:        2016-05-21 02:33:08+00:00
Summary:     The RIGHT way to do this
Affected #:  1 file

diff -r 0271a6317bc44fe5ca2ecff231b7c66ee0781f69 -r 87b7b8ee6c51488e5acf9dfbe2496b720b6264a9 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -80,21 +80,20 @@
         x1f = self._handle["x1f"]
         x2f = self._handle["x2f"]
         x3f = self._handle["x3f"]
-        nblock = tuple(np.max(log_loc, axis=0)+1)
+        nbx, nby, nbz = tuple(np.max(log_loc, axis=0)+1)
+        nlevel = self._handle.attrs["MaxLevel"]+1
 
-        block_grid = np.zeros(nblock, dtype=np.int)
-        level_grid = np.zeros(nblock, dtype=np.int)
-        block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2]] = np.arange(num_blocks)
-        level_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2]] = levels[:]
+        block_grid = -np.ones((nbx,nby,nbz,nlevel), dtype=np.int)
+        block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2],levels[:]] = np.arange(num_blocks)
 
         block_list = np.arange(num_blocks, dtype='int64')
         bc = []
         for i in range(num_blocks):
             if block_list[i] >= 0:
                 ii, jj, kk = log_loc[i]
-                loc_levels = level_grid[ii:ii+2,jj:jj+2,kk:kk+2]
-                if np.unique(loc_levels).size == 1:
-                    loc_ids = block_grid[ii:ii+2,jj:jj+2,kk:kk+2].transpose().flatten()
+                neigh = block_grid[ii:ii+2,jj:jj+2,kk:kk+2,levels[i]]
+                if np.all(neigh > -1):
+                    loc_ids = neigh.transpose().flatten()
                     bc.append(loc_ids)
                     block_list[loc_ids] = -1
                 else:


https://bitbucket.org/yt_analysis/yt/commits/e646fba435c7/
Changeset:   e646fba435c7
Branch:      yt
User:        jzuhone
Date:        2016-05-21 04:38:34+00:00
Summary:     Getting rid of logspherical.
Affected #:  4 files

diff -r 87b7b8ee6c51488e5acf9dfbe2496b720b6264a9 -r e646fba435c781d33400473f137cc85e062bdc24 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -69,7 +69,6 @@
     PolarCoordinateHandler, \
     CylindricalCoordinateHandler, \
     SphericalCoordinateHandler, \
-    LogSphericalCoordinateHandler, \
     GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
@@ -487,8 +486,6 @@
             cls = PolarCoordinateHandler
         elif self.geometry == "spherical":
             cls = SphericalCoordinateHandler
-        elif self.geometry == "logspherical":
-            cls = LogSphericalCoordinateHandler
         elif self.geometry == "geographic":
             cls = GeographicCoordinateHandler
         elif self.geometry == "spectral_cube":

diff -r 87b7b8ee6c51488e5acf9dfbe2496b720b6264a9 -r e646fba435c781d33400473f137cc85e062bdc24 yt/geometry/coordinates/api.py
--- a/yt/geometry/coordinates/api.py
+++ b/yt/geometry/coordinates/api.py
@@ -21,8 +21,7 @@
 from .cylindrical_coordinates import \
     CylindricalCoordinateHandler
 from .spherical_coordinates import \
-    SphericalCoordinateHandler, \
-    LogSphericalCoordinateHandler
+    SphericalCoordinateHandler
 from .geographic_coordinates import \
     GeographicCoordinateHandler
 from .spec_cube_coordinates import \

diff -r 87b7b8ee6c51488e5acf9dfbe2496b720b6264a9 -r e646fba435c781d33400473f137cc85e062bdc24 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -253,247 +253,3 @@
         print("Actual volume is                   %0.16e" % (v2))
         print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
 
-class LogSphericalCoordinateHandler(CoordinateHandler):
-
-    def __init__(self, ds, ordering = ('logr', 'theta', 'phi')):
-        super(LogSphericalCoordinateHandler, self).__init__(ds, ordering)
-        # Generate 
-        self.image_units = {}
-        self.image_units[self.axis_id['logr']] = ("rad", "rad")
-        self.image_units[self.axis_id['theta']] = (None, None)
-        self.image_units[self.axis_id['phi']] = (None, None)
-
-    def setup_fields(self, registry):
-        # return the fields for r, z, theta
-        registry.add_field(("index", "dx"), function=_unknown_coord)
-        registry.add_field(("index", "dy"), function=_unknown_coord)
-        registry.add_field(("index", "dz"), function=_unknown_coord)
-        registry.add_field(("index", "x"), function=_unknown_coord)
-        registry.add_field(("index", "y"), function=_unknown_coord)
-        registry.add_field(("index", "z"), function=_unknown_coord)
-        f1, f2 = _get_coord_fields(self.axis_id['logr'], "")
-        registry.add_field(("index", "dlogr"), function = f1,
-                           display_field = False,
-                           units = "")
-        registry.add_field(("index", "logr"), function = f2,
-                           display_field = False,
-                           units = "")
-
-        f1, f2 = _get_coord_fields(self.axis_id['theta'], "")
-        registry.add_field(("index", "dtheta"), function = f1,
-                           display_field = False,
-                           units = "")
-        registry.add_field(("index", "theta"), function = f2,
-                           display_field = False,
-                           units = "")
-
-        f1, f2 = _get_coord_fields(self.axis_id['phi'], "")
-        registry.add_field(("index", "dphi"), function = f1,
-                           display_field = False,
-                           units = "")
-        registry.add_field(("index", "phi"), function = f2,
-                           display_field = False,
-                           units = "")
-
-        def _logr_to_r(field, data):
-            return data.apply_units(10**data["logr"], "code_length")
-        registry.add_field(("index", "r"),
-                 function = _logr_to_r,
-                 units = "code_length")
-
-        def _dlogr_to_dr(field, data):
-            LE = 10**(data["logr"] - data["dlogr"]/2.0)
-            RE = 10**(data["logr"] + data["dlogr"]/2.0)
-            return data.apply_units(RE - LE, "code_length")
-        registry.add_field(("index", "dr"),
-                 function = _dlogr_to_dr,
-                 units = "code_length")
-
-        def _SphericalVolume(field, data):
-            # r**2 sin theta dr dtheta dphi
-            vol = data["index", "r"]**2.0
-            vol *= data["index", "dr"]
-            vol *= np.sin(data["index", "theta"])
-            vol *= data["index", "dtheta"]
-            vol *= data["index", "dphi"]
-            return vol
-        registry.add_field(("index", "cell_volume"),
-                 function=_SphericalVolume,
-                 units = "code_length**3")
-
-        def _path_r(field, data):
-            return data["index", "dr"]
-        registry.add_field(("index", "path_element_r"),
-                 function = _path_r,
-                 units = "code_length")
-        def _path_theta(field, data):
-            # Note: this already assumes cell-centered
-            return data["index", "r"] * data["index", "dtheta"]
-        registry.add_field(("index", "path_element_theta"),
-                 function = _path_theta,
-                 units = "code_length")
-        def _path_phi(field, data):
-            # Note: this already assumes cell-centered
-            return data["index", "r"] \
-                    * data["index", "dphi"] \
-                    * np.sin(data["index", "theta"])
-        registry.add_field(("index", "path_element_phi"),
-                 function = _path_phi,
-                 units = "code_length")
-
-    def pixelize(self, dimension, data_source, field, bounds, size,
-                 antialias = True, periodic = True):
-        self.period
-        name = self.axis_name[dimension]
-        if name == 'logr':
-            return self._ortho_pixelize(data_source, field, bounds, size,
-                                        antialias, dimension, periodic)
-        elif name in ('theta', 'phi'):
-            return self._cyl_pixelize(data_source, field, bounds, size,
-                                          antialias, dimension)
-        else:
-            raise NotImplementedError
-
-    def _ortho_pixelize(self, data_source, field, bounds, size, antialias,
-                        dim, periodic):
-        buff = pixelize_aitoff(data_source["py"], data_source["pdy"],
-                               data_source["px"], data_source["pdx"],
-                               size, data_source[field], None,
-                               None, theta_offset = 0,
-                               phi_offset = 0).transpose()
-        return buff
-
-
-    def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
-                      dimension):
-        if dimension == 1:
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
-        elif dimension == 2:
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
-            buff = buff.transpose()
-        else:
-            raise RuntimeError
-        return buff
-
-
-    def convert_from_cartesian(self, coord):
-        raise NotImplementedError
-
-    def convert_to_cartesian(self, coord):
-        if isinstance(coord, np.ndarray) and len(coord.shape) > 1:
-            ri = 10**self.axis_id['logr']
-            thetai = self.axis_id['theta']
-            phii = self.axis_id['phi']
-            r = coord[:,ri]
-            theta = coord[:,thetai]
-            phi = coord[:,phii]
-            nc = np.zeros_like(coord)
-            # r, theta, phi
-            nc[:,ri] = np.cos(phi) * np.sin(theta)*r
-            nc[:,thetai] = np.sin(phi) * np.sin(theta)*r
-            nc[:,phii] = np.cos(theta) * r
-        else:
-            r, theta, phi = coord
-            nc = (np.cos(phi) * np.sin(theta)*r,
-                  np.sin(phi) * np.sin(theta)*r,
-                  np.cos(theta) * r)
-        return nc
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_to_spherical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_spherical(self, coord):
-        raise NotImplementedError
-
-    _image_axis_name = None
-    @property
-    def image_axis_name(self):    
-        if self._image_axis_name is not None:
-            return self._image_axis_name
-        # This is the x and y axes labels that get displayed.  For
-        # non-Cartesian coordinates, we usually want to override these for
-        # Cartesian coordinates, since we transform them.
-        rv = {self.axis_id['logr']: ('theta', 'phi'),
-              self.axis_id['theta']: ('log_{10}(x) / \\sin(\\theta)', 'log_{10}(y) / \\sin(\\theta)'),
-              self.axis_id['phi']: ('log_{10}(R)', 'z')}
-        for i in list(rv.keys()):
-            rv[self.axis_name[i]] = rv[i]
-            rv[self.axis_name[i].capitalize()] = rv[i]
-        self._image_axis_name = rv
-        return rv
-
-    _x_pairs = (('logr', 'theta'), ('theta', 'logr'), ('phi', 'logr'))
-    _y_pairs = (('logr', 'phi'), ('theta', 'phi'), ('phi', 'theta'))
-
-    @property
-    def period(self):
-        return self.ds.domain_width
-
-    def sanitize_center(self, center, axis):
-        center, display_center = super(
-            LogSphericalCoordinateHandler, self).sanitize_center(center, axis)
-        name = self.axis_name[axis]
-        if name == 'logr':
-            display_center = center
-        elif name == 'theta':
-            display_center = (0.0 * display_center[0],
-                              0.0 * display_center[1],
-                              0.0 * display_center[2])
-        elif name == 'phi':
-            display_center = [self.ds.domain_width[0]/2.0 +
-                                self.ds.domain_left_edge[0],
-                              0.0 * display_center[1],
-                              0.0 * display_center[2]]
-            ri = self.axis_id['logr']
-            c = self.ds.domain_width[ri]/2.0 + self.ds.domain_left_edge[ri]
-            display_center[ri] = c
-            display_center = tuple(display_center)
-        return center, display_center
-
-    def sanitize_width(self, axis, width, depth):
-        name = self.axis_name[axis]
-        if width is not None:
-            width = super(LogSphericalCoordinateHandler, self).sanitize_width(
-              axis, width, depth)
-        elif name == 'logr':
-            width = [self.ds.domain_width[self.x_axis['logr']],
-                     self.ds.domain_width[self.y_axis['logr']]]
-        elif name == 'theta':
-            ri = self.axis_id['logr']
-            # Remember, in spherical coordinates when we cut in theta,
-            # we create a conic section
-            width = [2.0*self.ds.domain_width[ri],
-                     2.0*self.ds.domain_width[ri]]
-        elif name == 'phi':
-            ri = self.axis_id['logr']
-            width = [self.ds.domain_right_edge[ri],
-                     2.0*self.ds.domain_width[ri]]
-        return width
-
-    def _sanity_check(self):
-        """This prints out a handful of diagnostics that help verify the
-        dataset is well formed."""
-        # We just check a few things here.
-        dd = self.ds.all_data()
-        r0 = 10**self.ds.domain_left_edge[self.axis_id['logr']]
-        r1 = 10**self.ds.domain_right_edge[self.axis_id['logr']]
-        v1 = 4.0 * np.pi / 3.0 * (r1**3 - r0**3)
-        print("Total volume should be 4*pi*r**3 = %0.16e" % (v1))
-        v2 = dd.quantities.total_quantity("cell_volume")
-        print("Actual volume is                   %0.16e" % (v2))
-        print("Relative difference: %0.16e" % (np.abs(v2-v1)/(v2+v1)))
-

diff -r 87b7b8ee6c51488e5acf9dfbe2496b720b6264a9 -r e646fba435c781d33400473f137cc85e062bdc24 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -222,7 +222,6 @@
     # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
     'cartesian'    : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
     'spherical'    : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
-    'logspherical' : ( (-3.0, 0.0, 0.0), (5.0, np.pi, 2*np.pi) ),
     'cylindrical'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
     'polar'        : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
     'geographic'   : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt


https://bitbucket.org/yt_analysis/yt/commits/6ca0489acb7d/
Changeset:   6ca0489acb7d
Branch:      yt
User:        jzuhone
Date:        2016-05-21 04:40:11+00:00
Summary:     nits
Affected #:  1 file

diff -r e646fba435c781d33400473f137cc85e062bdc24 -r 6ca0489acb7dc27d708df4925e3def0a1257fb0d yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -220,11 +220,11 @@
 
 _geom_transforms = {
     # These are the bounds we want.  Cartesian we just assume goes 0 .. 1.
-    'cartesian'    : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
-    'spherical'    : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
-    'cylindrical'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
-    'polar'        : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
-    'geographic'   : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
+    'cartesian'  : ( (0.0, 0.0, 0.0), (1.0, 1.0, 1.0) ),
+    'spherical'  : ( (0.0, 0.0, 0.0), (1.0, np.pi, 2*np.pi) ),
+    'cylindrical': ( (0.0, 0.0, 0.0), (1.0, 1.0, 2.0*np.pi) ), # rzt
+    'polar'      : ( (0.0, 0.0, 0.0), (1.0, 2.0*np.pi, 1.0) ), # rtz
+    'geographic' : ( (-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0) ), # latlonalt
 }
 
 def fake_amr_ds(fields = ("Density",), geometry = "cartesian"):


https://bitbucket.org/yt_analysis/yt/commits/020ad50df00e/
Changeset:   020ad50df00e
Branch:      yt
User:        jzuhone
Date:        2016-05-23 20:51:06+00:00
Summary:     Magnetic fields in different geometries
Affected #:  1 file

diff -r 6ca0489acb7dc27d708df4925e3def0a1257fb0d -r 020ad50df00e75c2898dffcbc249114a3e0b27ad yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -36,15 +36,17 @@
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
     unit_system = registry.ds.unit_system
 
-    if (ftype,"magnetic_field_x") not in registry:
+    axis_names = registry.ds.coordinates.axis_order[0]
+
+    if (ftype,"magnetic_field_%s" % axis_names[0]) not in registry:
         return
 
-    u = registry[ftype,"magnetic_field_x"].units
+    u = registry[ftype,"magnetic_field_%s" % axis_names[0]].units
 
     def _magnetic_field_strength(field,data):
-        B2 = (data[ftype,"magnetic_field_x"]**2 +
-              data[ftype,"magnetic_field_y"]**2 +
-              data[ftype,"magnetic_field_z"]**2)
+        B2 = (data[ftype,"magnetic_field_%s" % axis_names[0]]**2 +
+              data[ftype,"magnetic_field_%s" % axis_names[1]]**2 +
+              data[ftype,"magnetic_field_%s" % axis_names[2]]**2)
         return np.sqrt(B2)
     registry.add_field((ftype,"magnetic_field_strength"),
                        function=_magnetic_field_strength,
@@ -69,36 +71,55 @@
              function=_magnetic_pressure,
              units=unit_system["pressure"])
 
-    def _magnetic_field_poloidal(field,data):
-        normal = data.get_field_parameter("normal")
-        d = data[ftype,'magnetic_field_x']
-        Bfields = data.ds.arr(
-                    [data[ftype,'magnetic_field_x'],
-                     data[ftype,'magnetic_field_y'],
-                     data[ftype,'magnetic_field_z']],
-                     d.units)
+    if registry.ds.geometry == "cartesian":
+        def _magnetic_field_poloidal(field,data):
+            normal = data.get_field_parameter("normal")
+            d = data[ftype,'magnetic_field_x']
+            Bfields = data.ds.arr(
+                        [data[ftype,'magnetic_field_x'],
+                         data[ftype,'magnetic_field_y'],
+                         data[ftype,'magnetic_field_z']],
+                         d.units)
 
-        theta = data["index", 'spherical_theta']
-        phi   = data["index", 'spherical_phi']
+            theta = data["index", 'spherical_theta']
+            phi   = data["index", 'spherical_phi']
 
-        return get_sph_theta_component(Bfields, theta, phi, normal)
+            return get_sph_theta_component(Bfields, theta, phi, normal)
+
+        def _magnetic_field_toroidal(field,data):
+            normal = data.get_field_parameter("normal")
+            d = data[ftype,'magnetic_field_x']
+            Bfields = data.ds.arr(
+                        [data[ftype,'magnetic_field_x'],
+                         data[ftype,'magnetic_field_y'],
+                         data[ftype,'magnetic_field_z']],
+                         d.units)
+
+            phi = data["index", 'spherical_phi']
+            return get_sph_phi_component(Bfields, phi, normal)
+
+    elif registry.ds.geometry == "cylindrical":
+        def _magnetic_field_poloidal(field, data):
+            r = data["index", "r"]
+            z = data["index", "z"]
+            d = np.sqrt(r*r+z*z)
+            return (data[ftype, "magnetic_field_r"]*(r/d) +
+                    data[ftype, "magnetic_field_z"]*(z/d))
+
+        def _magnetic_field_toroidal(field, data):
+            return data[ftype,"magnetic_field_theta"]
+
+    elif registry.ds.geometry == "spherical":
+        def _magnetic_field_poloidal(field, data):
+            return data[ftype,"magnetic_field_theta"]
+
+        def _magnetic_field_toroidal(field, data):
+            return data[ftype,"magnetic_field_phi"]
 
     registry.add_field((ftype, "magnetic_field_poloidal"),
              function=_magnetic_field_poloidal,
              units=u, validators=[ValidateParameter("normal")])
 
-    def _magnetic_field_toroidal(field,data):
-        normal = data.get_field_parameter("normal")
-        d = data[ftype,'magnetic_field_x']
-        Bfields = data.ds.arr(
-                    [data[ftype,'magnetic_field_x'],
-                     data[ftype,'magnetic_field_y'],
-                     data[ftype,'magnetic_field_z']],
-                     d.units)
-        
-        phi = data["index", 'spherical_phi']
-        return get_sph_phi_component(Bfields, phi, normal)
-
     registry.add_field((ftype, "magnetic_field_toroidal"),
              function=_magnetic_field_toroidal,
              units=u, validators=[ValidateParameter("normal")])
@@ -160,7 +181,7 @@
         def _mag_field(field, data):
             return convert(data[fd])
         return _mag_field
-    for ax, fd in zip("xyz", ds_fields):
+    for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields):
         registry.add_field((ftype,"magnetic_field_%s" % ax),
                            function=mag_field(fd),
                            units=unit_system[to_units.dimensions])
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/78591a7f0d8b/
Changeset:   78591a7f0d8b
Branch:      yt
User:        jzuhone
Date:        2016-05-23 20:59:00+00:00
Summary:     Merge
Affected #:  21 files

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1319,6 +1319,31 @@
 ``bbox``
        The bounding box for the particle positions.
 
+.. _loading-gizmo-data:
+
+Gizmo Data
+----------
+
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
+and then uses smoothing kernels to deposit those fields to an underlying 
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+To load Gizmo datasets using the standard HDF5 output format::
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("snapshot_600.hdf5")
+
+Because the Gizmo output format is similar to the Gadget format, yt
+may load Gizmo datasets as Gadget depending on the circumstances, but this
+should not pose a problem in most situations.  FIRE outputs will be loaded 
+accordingly due to the number of metallicity fields found (11 or 17).  
+
+For Gizmo outputs written as raw binary outputs, you may have to specify
+a bounding box, field specification, and units as are done for standard 
+Gadget outputs.  See :ref:`loading-gadget-data` for more information.
+
 .. _loading-pyne-data:
 
 Halo Catalog Data

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -28,16 +28,18 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| FITS                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | FLASH                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| FITS                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
-+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| Gizmo                 |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Maestro               |   Y [#f1]_ |     N     |      Y     |   Y   |    Y     |    Y     |     N      | Partial  |

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -406,16 +406,14 @@
    import yt
    ds = yt.load('MOOSE_sample_data/out.e-s010')
    sl = yt.SlicePlot(ds, 'z', ('connect1', 'diffused'))
-   sl.annotate_mesh_lines(thresh=0.1)
+   sl.annotate_mesh_lines(plot_args={'color':'black'})
    sl.zoom(0.75)
    sl.save()
 
-This annotation is performed by marking the pixels where the mapped coordinate is close
-to the element boundary. What counts as 'close' (in the mapped coordinate system) is
-determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
-thinner.
+The ``plot_args`` parameter is a dictionary of keyword arguments that will be passed
+to matplotlib. It can be used to control the mesh line color, thickness, etc...
 
-The above example all involve 8-node hexahedral mesh elements. Here is another example from
+The above examples all involve 8-node hexahedral mesh elements. Here is another example from
 a dataset that uses 6-node wedge elements:
 
 .. python-script::

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c setup.py
--- a/setup.py
+++ b/setup.py
@@ -200,7 +200,6 @@
               ["yt/utilities/lib/pixelization_routines.pyx",
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
-              language="c++",
               libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -26,6 +26,9 @@
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
+  local_gizmo_000:
+    - yt/frontends/gizmo/tests/test_outputs.py
+
   local_halos_000:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -32,6 +32,7 @@
     'gadget_fof',
     'gamer',
     'gdf',
+    'gizmo',
     'halo_catalog',
     'http_stream',
     'moab',

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/gizmo/api.py
--- /dev/null
+++ b/yt/frontends/gizmo/api.py
@@ -0,0 +1,21 @@
+"""
+API for Gizmo frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    GizmoDataset
+
+from .fields import \
+    GizmoFieldInfo

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/gizmo/data_structures.py
--- /dev/null
+++ b/yt/frontends/gizmo/data_structures.py
@@ -0,0 +1,44 @@
+"""
+Data structures for Gizmo frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.on_demand_imports import _h5py as h5py
+
+from yt.frontends.gadget.data_structures import \
+    GadgetHDF5Dataset
+
+from .fields import \
+    GizmoFieldInfo
+
+class GizmoDataset(GadgetHDF5Dataset):
+    _field_info_class = GizmoFieldInfo
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        need_groups = ['Header']
+        veto_groups = ['FOF', 'Group', 'Subhalo']
+        valid = True
+        try:
+            fh = h5py.File(args[0], mode='r')
+            valid = all(ng in fh["/"] for ng in need_groups) and \
+              not any(vg in fh["/"] for vg in veto_groups)
+            dmetal = "/PartType0/Metallicity"
+            if dmetal not in fh or fh[dmetal].shape[1] not in (11, 17):
+                valid = False
+            fh.close()
+        except:
+            valid = False
+            pass
+        return valid

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/gizmo/fields.py
--- /dev/null
+++ b/yt/frontends/gizmo/fields.py
@@ -0,0 +1,116 @@
+"""
+Gizmo-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
+from yt.fields.species_fields import \
+    add_species_field_by_density
+from yt.frontends.gadget.fields import \
+    GadgetFieldInfo
+from yt.frontends.sph.fields import \
+    SPHFieldInfo
+
+class GizmoFieldInfo(GadgetFieldInfo):
+    known_particle_fields = (
+        ("Mass", ("code_mass", ["particle_mass"], None)),
+        ("Masses", ("code_mass", ["particle_mass"], None)),
+        ("Coordinates", ("code_length", ["particle_position"], None)),
+        ("Velocity", ("code_velocity", ["particle_velocity"], None)),
+        ("Velocities", ("code_velocity", ["particle_velocity"], None)),
+        ("ParticleIDs", ("", ["particle_index"], None)),
+        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
+        ("Density", ("code_mass / code_length**3", ["density"], None)),
+        ("MaximumTemperature", ("K", [], None)),
+        ("Temperature", ("K", ["temperature"], None)),
+        ("Epsilon", ("code_length", [], None)),
+        ("Metals", ("code_metallicity", ["metallicity"], None)),
+        ("Metallicity", ("code_metallicity", ["metallicity"], None)),
+        ("Phi", ("code_length", [], None)),
+        ("StarFormationRate", ("Msun / yr", [], None)),
+        ("FormationTime", ("code_time", ["creation_time"], None)),
+        ("Metallicity_00", ("", ["metallicity"], None)),
+        ("Metallicity_01", ("", ["He_metallicity"], None)),
+        ("Metallicity_02", ("", ["C_metallicity"], None)),
+        ("Metallicity_03", ("", ["N_metallicity"], None)),
+        ("Metallicity_04", ("", ["O_metallicity"], None)),
+        ("Metallicity_05", ("", ["Ne_metallicity"], None)),
+        ("Metallicity_06", ("", ["Mg_metallicity"], None)),
+        ("Metallicity_07", ("", ["Si_metallicity"], None)),
+        ("Metallicity_08", ("", ["S_metallicity"], None)),
+        ("Metallicity_09", ("", ["Ca_metallicity"], None)),
+        ("Metallicity_10", ("", ["Fe_metallicity"], None)),
+    )
+
+    def __init__(self, *args, **kwargs):
+        super(SPHFieldInfo, self).__init__(*args, **kwargs)
+        if ("PartType0", "Metallicity_00") in self.field_list:
+            self.nuclei_names = ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
+                                 "Ca", "Fe"]
+
+    def setup_gas_particle_fields(self, ptype):
+        super(GizmoFieldInfo, self).setup_gas_particle_fields(ptype)
+        self.alias((ptype, "temperature"), (ptype, "Temperature"))
+
+        def _h_density(field, data):
+            x_H = 1.0 - data[(ptype, "He_metallicity")] - \
+              data[(ptype, "metallicity")]
+            return x_H * data[(ptype, "density")] * \
+              data[(ptype, "NeutralHydrogenAbundance")]
+
+        self.add_field(
+            (ptype, "H_density"),
+            function=_h_density,
+            particle_type=True,
+            units=self.ds.unit_system["density"])
+        add_species_field_by_density(self, ptype, "H", particle_type=True)
+        for suffix in ["density", "fraction", "mass", "number_density"]:
+            self.alias((ptype, "H_p0_%s" % suffix), (ptype, "H_%s" % suffix))
+
+        def _h_p1_density(field, data):
+            x_H = 1.0 - data[(ptype, "He_metallicity")] - \
+              data[(ptype, "metallicity")]
+            return x_H * data[(ptype, "density")] * \
+              (1.0 - data[(ptype, "NeutralHydrogenAbundance")])
+
+        self.add_field(
+            (ptype, "H_p1_density"),
+            function=_h_p1_density,
+            particle_type=True,
+            units=self.ds.unit_system["density"])
+        add_species_field_by_density(self, ptype, "H_p1", particle_type=True)
+
+        def _nuclei_mass_density_field(field, data):
+            species = field.name[1][:field.name[1].find("_")]
+            return data[ptype, "density"] * \
+              data[ptype, "%s_metallicity" % species]
+
+        num_neighbors = 64
+        for species in self.nuclei_names:
+            self.add_field(
+                (ptype, "%s_nuclei_mass_density" % species),
+                function=_nuclei_mass_density_field,
+                particle_type=True,
+                units=self.ds.unit_system["density"])
+
+            for suf in ["_nuclei_mass_density", "_metallicity"]:
+                field = "%s%s" % (species, suf)
+                fn = add_volume_weighted_smoothed_field(
+                    ptype, "particle_position", "particle_mass",
+                    "smoothing_length", "density", field,
+                    self, num_neighbors)
+
+                self.alias(("gas", field), fn[0])

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/gizmo/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/gizmo/tests/test_outputs.py
@@ -0,0 +1,46 @@
+"""
+Gizmo frontend tests
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from collections import OrderedDict
+
+from yt.utilities.answer_testing.framework import \
+    data_dir_load, \
+    requires_ds, \
+    sph_answer
+from yt.frontends.gizmo.api import GizmoDataset
+
+FIRE_m12i = 'FIRE_M12i_ref11/snapshot_600.hdf5'
+
+# This maps from field names to weight field names to use for projections
+fields = OrderedDict(
+    [
+        (("gas", "density"), None),
+        (("gas", "temperature"), ('gas', 'density')),
+        (("gas", "metallicity"), ('gas', 'density')),
+        (("gas", "O_metallicity"), ('gas', 'density')),
+        (('gas', 'velocity_magnitude'), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+    ]
+)
+
+ at requires_ds(FIRE_m12i)
+def test_GizmoDataset():
+    ds = data_dir_load(FIRE_m12i)
+    assert isinstance(ds, GizmoDataset)
+    for test in sph_answer(ds, 'snapshot_600', 4786950, fields):
+        test_GizmoDataset.__name__ = test.description
+        yield test

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -41,27 +41,9 @@
         ("Phi", ("code_length", [], None)),
         ("StarFormationRate", ("Msun / yr", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
-        # These are metallicity fields that get discovered for FIRE simulations
         ("Metallicity_00", ("", ["metallicity"], None)),
-        ("Metallicity_01", ("", ["He_fraction"], None)),
-        ("Metallicity_02", ("", ["C_fraction"], None)),
-        ("Metallicity_03", ("", ["N_fraction"], None)),
-        ("Metallicity_04", ("", ["O_fraction"], None)),
-        ("Metallicity_05", ("", ["Ne_fraction"], None)),
-        ("Metallicity_06", ("", ["Mg_fraction"], None)),
-        ("Metallicity_07", ("", ["Si_fraction"], None)),
-        ("Metallicity_08", ("", ["S_fraction"], None)),
-        ("Metallicity_09", ("", ["Ca_fraction"], None)),
-        ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
-    def __init__(self, *args, **kwargs):
-        super(SPHFieldInfo, self).__init__(*args, **kwargs)
-        # Special case for FIRE
-        if ("PartType0", "Metallicity_00") in self.field_list:
-            self.species_names += ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
-                "Ca", "Fe"]
-
     def setup_particle_fields(self, ptype, *args, **kwargs):
         super(SPHFieldInfo, self).setup_particle_fields(ptype, *args, **kwargs)
         setup_species_fields(self, ptype)

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -76,10 +76,15 @@
             field_data = ad[field]
             buff_size = size[0:dimension] + (1,) + size[dimension:]
 
+            ax = data_source.axis
+            xax = self.x_axis[ax]
+            yax = self.y_axis[ax]
             c = np.float64(data_source.center[dimension].d)
-            bounds.insert(2*dimension, c)
-            bounds.insert(2*dimension, c)
-            bounds = np.reshape(bounds, (3, 2))
+
+            extents = np.zeros((3, 2))
+            extents[ax] = np.array([c, c])
+            extents[xax] = bounds[0:2]
+            extents[yax] = bounds[2:4]
 
             # if this is an element field, promote to 2D here
             if len(field_data.shape) == 1:
@@ -101,13 +106,10 @@
 
             img = pixelize_element_mesh(coords,
                                         indices,
-                                        buff_size, field_data, bounds,
+                                        buff_size, field_data, extents,
                                         index_offset=offset)
 
             # re-order the array and squeeze out the dummy dim
-            ax = data_source.axis
-            xax = self.x_axis[ax]
-            yax = self.y_axis[ax]
             return np.squeeze(np.transpose(img, (yax, xax, ax)))
 
         elif dimension < 3:

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -30,11 +30,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
     cdef int check_mesh_lines(self, double* mapped_coord) nogil
 
 
@@ -52,11 +47,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
 
 cdef class P1Sampler3D(ElementSampler):
 
@@ -72,11 +62,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
     cdef int check_mesh_lines(self, double* mapped_coord) nogil
 
 
@@ -169,11 +154,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
     cdef int check_mesh_lines(self, double* mapped_coord) nogil
 
 
@@ -191,11 +171,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
     cdef int check_mesh_lines(self, double* mapped_coord) nogil
 
 
@@ -214,11 +189,6 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil
-
     cdef int check_mesh_lines(self, double* mapped_coord) nogil
 
 
@@ -250,8 +220,3 @@
                                      double* vals) nogil
 
     cdef int check_inside(self, double* mapped_coord) nogil
-
-    cdef int check_near_edge(self,
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -86,15 +86,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-        pass
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef int check_mesh_lines(self, double* mapped_coord) nogil:
         pass
 
@@ -182,19 +173,6 @@
                 return 0
         return 1
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-
-        if (fabs(mapped_coord[direction]) < tolerance or
-            fabs(mapped_coord[direction] - 1.0) < tolerance):
-            return 1
-        return 0
-
 
 cdef class P1Sampler3D(ElementSampler):
     '''
@@ -261,19 +239,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-
-        if (fabs(mapped_coord[direction]) < tolerance or
-            fabs(mapped_coord[direction] - 1.0) < tolerance):
-            return 1
-        return 0
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef int check_mesh_lines(self, double* mapped_coord) nogil:
         cdef double u, v, w
         cdef double thresh = 2.0e-2
@@ -413,18 +378,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-        if (fabs(fabs(mapped_coord[direction]) - 1.0) < tolerance):
-            return 1
-        else:
-            return 0
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef int check_mesh_lines(self, double* mapped_coord) nogil:
         if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
             fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
@@ -568,19 +521,6 @@
             return 0
         return 1
 
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-        if (fabs(fabs(mapped_coord[direction]) - 1.0) < tolerance):
-            return 1
-        else:
-            return 0
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -781,22 +721,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-        if (direction == 2 and (fabs(fabs(mapped_coord[direction]) - 1.0) < tolerance)):
-            return 1
-        elif (direction == 1 and (fabs(mapped_coord[direction]) < tolerance)):
-            return 1
-        elif (direction == 0 and (fabs(mapped_coord[0]) < tolerance or
-                                  fabs(mapped_coord[0] + mapped_coord[1] - 1.0) < tolerance)):
-            return 1
-        return 0
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef int check_mesh_lines(self, double* mapped_coord) nogil:
         cdef double r, s, t
         cdef double thresh = 5.0e-2
@@ -969,20 +893,6 @@
             return 0
         return 1
 
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef int check_near_edge(self, 
-                             double* mapped_coord,
-                             double tolerance,
-                             int direction) nogil:
-        if (fabs(fabs(mapped_coord[direction]) - 1.0) < tolerance):
-            return 1
-        else:
-            return 0
-
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -18,8 +18,9 @@
 cimport cython
 from libc.stdlib cimport malloc, free
 from yt.utilities.lib.fp_utils cimport fclip, i64clip
-from libc.math cimport copysign
+from libc.math cimport copysign, fabs
 from yt.utilities.exceptions import YTDomainOverflow
+from yt.utilities.lib.vec3_ops cimport subtract, cross, dot, L2_norm
 
 cdef extern from "math.h":
     double exp(double x) nogil
@@ -501,6 +502,11 @@
     cdef np.float64_t p0[3]
     cdef np.float64_t p1[3]
     cdef np.float64_t p3[3]
+    cdef np.float64_t E0[3]
+    cdef np.float64_t E1[3]
+    cdef np.float64_t tri_norm[3]
+    cdef np.float64_t plane_norm[3]
+    cdef np.float64_t dp
     cdef int i, j, k, count, ntri, nlines
     nlines = 0
     ntri = triangles.shape[0]
@@ -510,6 +516,22 @@
     first = last = points = NULL
     for i in range(ntri):
         count = 0
+
+        # skip if triangle is close to being parallel to plane
+        for j in range(3):
+            p0[j] = triangles[i, 0, j]
+            p1[j] = triangles[i, 1, j]
+            p3[j] = triangles[i, 2, j]            
+            plane_norm[j] = 0.0
+        plane_norm[ax] = 1.0
+        subtract(p1, p0, E0)
+        subtract(p3, p0, E1)
+        cross(E0, E1, tri_norm)
+        dp = dot(tri_norm, plane_norm)
+        dp /= L2_norm(tri_norm)
+        if (fabs(dp) > 0.995):
+            continue
+        
         # Now for each line segment (01, 12, 20) we check to see how many cross
         # the coordinate.
         for j in range(3):

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -56,6 +56,38 @@
     int hex20_faces[6][8]
 
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def triangulate_indices(np.ndarray[np.int64_t, ndim=2] indices):
+    cdef np.int64_t num_elem = indices.shape[0]
+    cdef np.int64_t num_verts = indices.shape[1]
+    cdef int[MAX_NUM_TRI][3] tri_array
+    cdef np.int64_t tri_per_elem
+
+    if (num_verts == 8 or num_verts == 20 or num_verts == 27):
+        tri_per_elem = HEX_NT
+        tri_array = triangulate_hex
+    elif num_verts == 4:
+        tri_per_elem = TETRA_NT
+        tri_array = triangulate_tetra
+    elif num_verts == 6:
+        tri_per_elem = WEDGE_NT
+        tri_array = triangulate_wedge
+    else:
+        raise YTElementTypeNotRecognized(3, num_verts)
+
+    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
+    tri_indices = np.empty((tri_per_elem * num_elem, 3), dtype="int64")
+
+    cdef np.int64_t i, j
+    for i in range(num_elem):
+        for j in range(tri_per_elem):
+            for k in range(3):
+                tri_indices[i*tri_per_elem + j][k] = indices[i][tri_array[j][k]]
+    return tri_indices
+
+
 cdef class LinearElementMesh:
     r'''
 

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -18,7 +18,8 @@
 cimport cython
 cimport libc.math as math
 from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, imax, fabs
-from yt.utilities.exceptions import YTPixelizeError, \
+from yt.utilities.exceptions import \
+    YTPixelizeError, \
     YTElementTypeNotRecognized
 from libc.stdlib cimport malloc, free
 from vec3_ops cimport dot, cross, subtract
@@ -544,8 +545,7 @@
                           buff_size,
                           np.ndarray[np.float64_t, ndim=2] field,
                           extents, 
-                          int index_offset = 0,
-                          double thresh = -1.0):
+                          int index_offset = 0):
     cdef np.ndarray[np.float64_t, ndim=3] img
     img = np.zeros(buff_size, dtype="float64")
     # Two steps:
@@ -592,27 +592,10 @@
 
     # if we are in 2D land, the 1 cell thick dimension had better be 'z'
     if ndim == 2:
-        assert(buff_size[2] == 1)
+        if buff_size[2] != 1:
+            raise RuntimeError("Slices of 2D datasets must be "
+                               "perpendicular to the 'z' direction.")
     
-    ax = -1
-    for i in range(3):
-        if buff_size[i] == 1:
-            ax = i
-    if ax == -1:
-        raise RuntimeError
-    xax = yax = -1
-    if ax == 0:
-        xax = 1
-        yax = 2
-    elif ax == 1:
-        xax = 1
-        yax = 2
-    elif ax == 2:
-        xax = 0
-        yax = 1
-    if xax == -1 or yax == -1:
-        raise RuntimeError
-
     # allocate temporary storage
     vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
     field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
@@ -673,20 +656,11 @@
                     sampler.map_real_to_unit(mapped_coord, vertices, ppoint)
                     if not sampler.check_inside(mapped_coord):
                         continue
-                    # if thresh is negative, we do the normal sample operation
-                    if thresh < 0.0:
-                        if (num_field_vals == 1):
-                            img[pi, pj, pk] = field_vals[0]
-                        else:
-                            img[pi, pj, pk] = sampler.sample_at_unit_point(mapped_coord,
-                                                                           field_vals)
+                    if (num_field_vals == 1):
+                        img[pi, pj, pk] = field_vals[0]
                     else:
-                        # otherwise, we draw the element boundaries
-                        if sampler.check_near_edge(mapped_coord, thresh, xax):
-                            img[pi, pj, pk] = 1.0
-                        elif sampler.check_near_edge(mapped_coord, thresh, yax):
-                            img[pi, pj, pk] = 1.0
-
+                        img[pi, pj, pk] = sampler.sample_at_unit_point(mapped_coord,
+                                                                       field_vals)
     free(vertices)
     free(field_vals)
     return img

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -22,8 +22,6 @@
 
 from distutils.version import LooseVersion
 
-from yt.config import \
-    ytcfg
 from yt.funcs import \
     mylog, iterable
 from yt.extern.six import add_metaclass
@@ -31,13 +29,14 @@
 from yt.visualization.image_writer import apply_colormap
 from yt.utilities.lib.geometry_utils import triangle_plane_intersect
 from yt.utilities.lib.pixelization_routines import \
-    pixelize_element_mesh, pixelize_off_axis_cartesian, \
+    pixelize_off_axis_cartesian, \
     pixelize_cartesian
 from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
     periodic_ray
 from yt.utilities.lib.line_integral_convolution import \
     line_integral_convolution_2d
-
+from yt.geometry.unstructured_mesh_handler import UnstructuredIndex
+from yt.utilities.lib.mesh_construction import triangulate_indices
 
 from . import _MPL
 
@@ -1605,85 +1604,74 @@
 
 class MeshLinesCallback(PlotCallback):
     """
-    annotate_mesh_lines(thresh=0.01)
+    annotate_mesh_lines()
 
-    Adds the mesh lines to the plot.
-
-    This is done by marking the pixels where the mapped coordinate
-    is within some threshold distance of one of the element boundaries.
-    If the mesh lines are too thick or too thin, try varying thresh.
+    Adds mesh lines to the plot. Only works for unstructured or 
+    semi-structured mesh data. For structured grid data, see
+    GridBoundaryCallback or CellEdgesCallback.
 
     Parameters
     ----------
 
-    thresh : float
-        The threshold distance, in mapped coordinates, within which the
-        pixels will be marked as part of the element boundary.
-        Default is 0.01.
+    plot_args:   dict, optional
+        A dictionary of arguments that will be passed to matplotlib.
+
+    Example
+    -------
+
+    >>> import yt
+    >>> ds = yt.load("MOOSE_sample_data/out.e-s010")
+    >>> sl = yt.SlicePlot(ds, 'z', ('connect2', 'convected'))
+    >>> sl.annotate_mesh_lines(plot_args={'color':'black'})
 
     """
     _type_name = "mesh_lines"
 
-    def __init__(self, thresh=0.1, cmap=None):
+    def __init__(self, plot_args=None):
         super(MeshLinesCallback, self).__init__()
-        self.thresh = thresh
-        if cmap is None:
-            cmap = ytcfg.get("yt", "default_colormap")
-        self.cmap = cmap
+        self.plot_args = plot_args
+
+    def promote_2d_to_3d(self, coords, indices, plot):
+        new_coords = np.zeros((2*coords.shape[0], 3))
+        new_connects = np.zeros((indices.shape[0], 2*indices.shape[1]), 
+                                dtype=np.int64)
+    
+        new_coords[0:coords.shape[0],0:2] = coords
+        new_coords[0:coords.shape[0],2] = plot.ds.domain_left_edge[2]
+        new_coords[coords.shape[0]:,0:2] = coords
+        new_coords[coords.shape[0]:,2] = plot.ds.domain_right_edge[2]
+        
+        new_connects[:,0:indices.shape[1]] = indices
+        new_connects[:,indices.shape[1]:] = indices + coords.shape[0]
+    
+        return new_coords, new_connects
 
     def __call__(self, plot):
 
+        index = plot.ds.index
+        if not issubclass(type(index), UnstructuredIndex):
+            raise RuntimeError("Mesh line annotations only work for "
+                               "unstructured or semi-structured mesh data.")
         ftype, fname = plot.field
-        mesh_id = int(ftype[-1]) - 1
-        mesh = plot.ds.index.meshes[mesh_id]
-
-        coords = mesh.connectivity_coords
-        indices = mesh.connectivity_indices
-
-        xx0, xx1 = plot._axes.get_xlim()
-        yy0, yy1 = plot._axes.get_ylim()
-
-        offset = mesh._index_offset
-        ax = plot.data.axis
-        xax = plot.data.ds.coordinates.x_axis[ax]
-        yax = plot.data.ds.coordinates.y_axis[ax]
-
-        size = plot.image._A.shape
-        c = plot.data.center[ax]
-        buff_size = size[0:ax] + (1,) + size[ax:]
+        for i, m in enumerate(index.meshes):
+            if ftype.startswith('connect') and int(ftype[-1]) - 1 != i:
+                continue
+            coords = m.connectivity_coords
+            indices = m.connectivity_indices - m._index_offset
+            
+            num_verts = indices.shape[1]
+            num_dims = coords.shape[1]
 
-        x0, x1 = plot.xlim
-        y0, y1 = plot.ylim
-        c = plot.data.center[ax]
-        bounds = [x0, x1, y0, y1]
-        bounds.insert(2*ax, c)
-        bounds.insert(2*ax, c)
-        bounds = np.reshape(bounds, (3, 2))
+            if num_dims == 2 and num_verts == 3:
+                coords, indices = self.promote_2d_to_3d(coords, indices, plot)
+            elif num_dims == 2 and num_verts == 4:
+                coords, indices = self.promote_2d_to_3d(coords, indices, plot)
 
-        # draw the mesh lines by marking where the mapped
-        # coordinate is close to the domain edge. We do this by
-        # calling the pixelizer with a dummy field and passing in
-        # a non-negative thresh.
-        dummy_field = np.zeros(indices.shape, dtype=np.float64)
-        img = pixelize_element_mesh(coords, indices,
-                                    buff_size,
-                                    dummy_field,
-                                    bounds,
-                                    offset, self.thresh)
-        img = np.squeeze(np.transpose(img, (yax, xax, ax)))
-
-        # convert to RGBA
-        size = plot.frb.buff_size
-        image = np.zeros((size[0], size[1], 4), dtype=np.uint8)
-        image[:, :, 0][img > 0.0] = 0
-        image[:, :, 1][img > 0.0] = 0
-        image[:, :, 2][img > 0.0] = 0
-        image[:, :, 3][img > 0.0] = 255
-
-        plot._axes.imshow(image, zorder=1,
-                          extent=[xx0, xx1, yy0, yy1],
-                          origin='lower', cmap=self.cmap,
-                          interpolation='nearest')
+            tri_indices = triangulate_indices(indices)
+            points = coords[tri_indices]
+        
+            tfc = TriangleFacetsCallback(points, plot_args=self.plot_args)
+            tfc(plot)
 
 
 class TriangleFacetsCallback(PlotCallback):

diff -r 020ad50df00e75c2898dffcbc249114a3e0b27ad -r 78591a7f0d8bb552abecfafcfec08626c53fd04c yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -21,7 +21,9 @@
 from yt.config import \
     ytcfg
 from yt.testing import \
-    fake_amr_ds
+    fake_amr_ds, \
+    fake_tetrahedral_ds, \
+    fake_hexahedral_ds
 import yt.units as u
 from .test_plotwindow import assert_fname
 from yt.utilities.exceptions import \
@@ -368,6 +370,22 @@
                               color=(0.0, 1.0, 1.0))
         p.save(prefix)
 
+def test_mesh_lines_callback():
+    with _cleanup_fname() as prefix:
+
+        ds = fake_hexahedral_ds()
+        for field in ds.field_list:
+            sl = SlicePlot(ds, 1, field)
+            sl.annotate_mesh_lines(plot_args={'color':'black'})
+            yield assert_fname, sl.save(prefix)[0]
+
+        ds = fake_tetrahedral_ds()
+        for field in ds.field_list:
+            sl = SlicePlot(ds, 1, field)
+            sl.annotate_mesh_lines(plot_args={'color':'black'})
+            yield assert_fname, sl.save(prefix)[0]
+                
+
 def test_line_integral_convolution_callback():
     with _cleanup_fname() as prefix:
         ds = fake_amr_ds(fields =
@@ -390,3 +408,4 @@
                                              alpha=0.9, const_alpha=True)
         p.save(prefix)
 
+


https://bitbucket.org/yt_analysis/yt/commits/4300db642bd0/
Changeset:   4300db642bd0
Branch:      yt
User:        jzuhone
Date:        2016-05-23 21:44:20+00:00
Summary:     Merge
Affected #:  6 files

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,12 +157,6 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
-    Extension("yt.utilities.lib.primitives",
-              ["yt/utilities/lib/primitives.pyx"],
-              libraries=std_libs, 
-              depends=["yt/utilities/lib/primitives.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd",
-                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
@@ -170,6 +164,7 @@
               extra_link_args=omp_args,
               libraries=std_libs,
               depends=["yt/utilities/lib/element_mappings.pxd",
+                       "yt/utilities/lib/mesh_triangulation.h",
                        "yt/utilities/lib/vec3_ops.pxd",
                        "yt/utilities/lib/primitives.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
@@ -196,6 +191,9 @@
                        "yt/utilities/lib/fixed_interpolator.pxd",
                        "yt/utilities/lib/fixed_interpolator.h",
                        ]),
+    Extension("yt.utilities.lib.mesh_triangulation",
+              ["yt/utilities/lib/mesh_triangulation.pyx"],
+              depends=["yt/utilities/lib/mesh_triangulation.h"]),
     Extension("yt.utilities.lib.pixelization_routines",
               ["yt/utilities/lib/pixelization_routines.pyx",
                "yt/utilities/lib/pixelization_constants.c"],
@@ -203,6 +201,12 @@
               libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),
+    Extension("yt.utilities.lib.primitives",
+              ["yt/utilities/lib/primitives.pyx"],
+              libraries=std_libs, 
+              depends=["yt/utilities/lib/primitives.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],
@@ -282,6 +286,7 @@
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
                   depends=["yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/mesh_triangulation.h",
                            "yt/utilities/lib/mesh_intersection.pxd",
                            "yt/utlilites/lib/mesh_samplers.pxd",
                            "yt/utlilites/lib/mesh_traversal.pxd"]),

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -4,7 +4,7 @@
 from yt.utilities.lib.element_mappings cimport ElementSampler
 from yt.utilities.lib.primitives cimport BBox, Ray
 
-cdef extern from "mesh_construction.h":
+cdef extern from "mesh_triangulation.h":
     enum:
         MAX_NUM_TRI
 

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -40,7 +40,7 @@
     patchBoundsFunc
 from yt.utilities.exceptions import YTElementTypeNotRecognized
 
-cdef extern from "mesh_construction.h":
+cdef extern from "mesh_triangulation.h":
     enum:
         MAX_NUM_TRI
         
@@ -56,38 +56,6 @@
     int hex20_faces[6][8]
 
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def triangulate_indices(np.ndarray[np.int64_t, ndim=2] indices):
-    cdef np.int64_t num_elem = indices.shape[0]
-    cdef np.int64_t num_verts = indices.shape[1]
-    cdef int[MAX_NUM_TRI][3] tri_array
-    cdef np.int64_t tri_per_elem
-
-    if (num_verts == 8 or num_verts == 20 or num_verts == 27):
-        tri_per_elem = HEX_NT
-        tri_array = triangulate_hex
-    elif num_verts == 4:
-        tri_per_elem = TETRA_NT
-        tri_array = triangulate_tetra
-    elif num_verts == 6:
-        tri_per_elem = WEDGE_NT
-        tri_array = triangulate_wedge
-    else:
-        raise YTElementTypeNotRecognized(3, num_verts)
-
-    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
-    tri_indices = np.empty((tri_per_elem * num_elem, 3), dtype="int64")
-
-    cdef np.int64_t i, j
-    for i in range(num_elem):
-        for j in range(tri_per_elem):
-            for k in range(3):
-                tri_indices[i*tri_per_elem + j][k] = indices[i][tri_array[j][k]]
-    return tri_indices
-
-
 cdef class LinearElementMesh:
     r'''
 

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 yt/utilities/lib/mesh_triangulation.h
--- /dev/null
+++ b/yt/utilities/lib/mesh_triangulation.h
@@ -0,0 +1,66 @@
+#define MAX_NUM_TRI 12
+#define HEX_NV 8
+#define HEX_NT 12
+#define TETRA_NV 4
+#define TETRA_NT 4
+#define WEDGE_NV 6
+#define WEDGE_NT 8
+
+// This array is used to triangulate the hexahedral mesh elements
+// Each element has six faces with two triangles each.
+// The vertex ordering convention is assumed to follow that used
+// here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
+// Note that this is the case for Exodus II data.
+int triangulate_hex[MAX_NUM_TRI][3] = {
+  {0, 2, 1}, {0, 3, 2}, // Face is 3 2 1 0 
+  {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
+  {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
+  {1, 2, 6}, {1, 6, 5}, // Face is 1 2 6 5
+  {0, 7, 3}, {0, 4, 7}, // Face is 3 0 4 7
+  {3, 6, 2}, {3, 7, 6}  // Face is 2 3 7 6
+};
+
+// Similarly, this is used to triangulate the tetrahedral cells
+int triangulate_tetra[MAX_NUM_TRI][3] = {
+  {0, 1, 2}, 
+  {0, 1, 3},
+  {0, 2, 3},
+  {1, 2, 3},
+
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1}
+};
+
+// Triangulate wedges
+int triangulate_wedge[MAX_NUM_TRI][3] = {
+  {0, 1, 2},
+  {0, 3, 1},
+  {1, 3, 4},
+  {0, 2, 3},
+  {2, 5, 3},
+  {1, 4, 2},
+  {2, 4, 5},
+  {3, 5, 4},
+
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1}
+};
+
+
+// This is used to select faces from a 20-sided hex element
+int hex20_faces[6][8] = {
+  {0, 1, 5, 4, 12, 8,  13, 16}, 
+  {1, 2, 6, 5, 13, 9,  14, 17},
+  {3, 2, 6, 7, 15, 10, 14, 18},
+  {0, 3, 7, 4, 12, 11, 15, 19},
+  {4, 5, 6, 7, 19, 16, 17, 18},
+  {0, 1, 2, 3, 11, 8,  9,  10}
+};

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 yt/utilities/lib/mesh_triangulation.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_triangulation.pyx
@@ -0,0 +1,52 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from yt.utilities.exceptions import YTElementTypeNotRecognized
+
+cdef extern from "mesh_triangulation.h":
+    enum:
+        MAX_NUM_TRI
+        
+    int HEX_NV
+    int HEX_NT
+    int TETRA_NV
+    int TETRA_NT
+    int WEDGE_NV
+    int WEDGE_NT
+    int triangulate_hex[MAX_NUM_TRI][3]
+    int triangulate_tetra[MAX_NUM_TRI][3]
+    int triangulate_wedge[MAX_NUM_TRI][3]
+    int hex20_faces[6][8]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def triangulate_indices(np.ndarray[np.int64_t, ndim=2] indices):
+    cdef np.int64_t num_elem = indices.shape[0]
+    cdef np.int64_t num_verts = indices.shape[1]
+    cdef int[MAX_NUM_TRI][3] tri_array
+    cdef np.int64_t tri_per_elem
+
+    if (num_verts == 8 or num_verts == 20 or num_verts == 27):
+        tri_per_elem = HEX_NT
+        tri_array = triangulate_hex
+    elif num_verts == 4:
+        tri_per_elem = TETRA_NT
+        tri_array = triangulate_tetra
+    elif num_verts == 6:
+        tri_per_elem = WEDGE_NT
+        tri_array = triangulate_wedge
+    else:
+        raise YTElementTypeNotRecognized(3, num_verts)
+
+    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
+    tri_indices = np.empty((tri_per_elem * num_elem, 3), dtype="int64")
+
+    cdef np.int64_t i, j
+    for i in range(num_elem):
+        for j in range(tri_per_elem):
+            for k in range(3):
+                tri_indices[i*tri_per_elem + j][k] = indices[i][tri_array[j][k]]
+    return tri_indices

diff -r 78591a7f0d8bb552abecfafcfec08626c53fd04c -r 4300db642bd0174f9efc95e4a6908d58c117bba1 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -36,7 +36,7 @@
 from yt.utilities.lib.line_integral_convolution import \
     line_integral_convolution_2d
 from yt.geometry.unstructured_mesh_handler import UnstructuredIndex
-from yt.utilities.lib.mesh_construction import triangulate_indices
+from yt.utilities.lib.mesh_triangulation import triangulate_indices
 
 from . import _MPL
 


https://bitbucket.org/yt_analysis/yt/commits/baa18d26527d/
Changeset:   baa18d26527d
Branch:      yt
User:        jzuhone
Date:        2016-05-23 22:12:01+00:00
Summary:     Merge
Affected #:  2 files

diff -r 4300db642bd0174f9efc95e4a6908d58c117bba1 -r baa18d26527d02dc9078281c1c5bb42d47546a04 yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#define MAX_NUM_TRI 12
-#define HEX_NV 8
-#define HEX_NT 12
-#define TETRA_NV 4
-#define TETRA_NT 4
-#define WEDGE_NV 6
-#define WEDGE_NT 8
-
-// This array is used to triangulate the hexahedral mesh elements
-// Each element has six faces with two triangles each.
-// The vertex ordering convention is assumed to follow that used
-// here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
-// Note that this is the case for Exodus II data.
-int triangulate_hex[MAX_NUM_TRI][3] = {
-  {0, 2, 1}, {0, 3, 2}, // Face is 3 2 1 0 
-  {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
-  {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
-  {1, 2, 6}, {1, 6, 5}, // Face is 1 2 6 5
-  {0, 7, 3}, {0, 4, 7}, // Face is 3 0 4 7
-  {3, 6, 2}, {3, 7, 6}  // Face is 2 3 7 6
-};
-
-// Similarly, this is used to triangulate the tetrahedral cells
-int triangulate_tetra[MAX_NUM_TRI][3] = {
-  {0, 1, 2}, 
-  {0, 1, 3},
-  {0, 2, 3},
-  {1, 2, 3},
-
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1}
-};
-
-// Triangulate wedges
-int triangulate_wedge[MAX_NUM_TRI][3] = {
-  {0, 1, 2},
-  {0, 3, 1},
-  {1, 3, 4},
-  {0, 2, 3},
-  {2, 5, 3},
-  {1, 4, 2},
-  {2, 4, 5},
-  {3, 5, 4},
-
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1},
-  {-1, -1, -1}
-};
-
-
-// This is used to select faces from a 20-sided hex element
-int hex20_faces[6][8] = {
-  {0, 1, 5, 4, 12, 8,  13, 16}, 
-  {1, 2, 6, 5, 13, 9,  14, 17},
-  {3, 2, 6, 7, 15, 10, 14, 18},
-  {0, 3, 7, 4, 12, 11, 15, 19},
-  {4, 5, 6, 7, 19, 16, 17, 18},
-  {0, 1, 2, 3, 11, 8,  9,  10}
-};

diff -r 4300db642bd0174f9efc95e4a6908d58c117bba1 -r baa18d26527d02dc9078281c1c5bb42d47546a04 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1652,10 +1652,13 @@
         if not issubclass(type(index), UnstructuredIndex):
             raise RuntimeError("Mesh line annotations only work for "
                                "unstructured or semi-structured mesh data.")
-        ftype, fname = plot.field
         for i, m in enumerate(index.meshes):
-            if ftype.startswith('connect') and int(ftype[-1]) - 1 != i:
-                continue
+            try:
+                ftype, fname = plot.field
+                if ftype.startswith('connect') and int(ftype[-1]) - 1 != i:
+                    continue
+            except ValueError:
+                pass
             coords = m.connectivity_coords
             indices = m.connectivity_indices - m._index_offset
             


https://bitbucket.org/yt_analysis/yt/commits/094bd564f76c/
Changeset:   094bd564f76c
Branch:      yt
User:        jzuhone
Date:        2016-05-23 22:18:17+00:00
Summary:     Merge
Affected #:  3 files

diff -r baa18d26527d02dc9078281c1c5bb42d47546a04 -r 094bd564f76c966e5965443c151e70ad2cb37890 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -57,10 +57,6 @@
         mylog.debug("Detecting fields.")
         self._detect_output_fields()
 
-    def __del__(self):
-        if self._data_file is not None:
-            self._data_file.close()
-
     def _initialize_state_variables(self):
         self._parallel_locking = False
         self._data_file = None

diff -r baa18d26527d02dc9078281c1c5bb42d47546a04 -r 094bd564f76c966e5965443c151e70ad2cb37890 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.on_demand_imports import _h5py as h5py
-from distutils.version import LooseVersion
 
 class HDF5FileHandler(object):
     handle = None

diff -r baa18d26527d02dc9078281c1c5bb42d47546a04 -r 094bd564f76c966e5965443c151e70ad2cb37890 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -10,6 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from pkg_resources import parse_version
+
 class NotAModule(object):
     """
     A class to implement an informative error message that will be outputted if
@@ -28,7 +30,7 @@
     def __call__(self, *args, **kwargs):
         raise self.error
 
-class astropy_imports:
+class astropy_imports(object):
     _name = "astropy"
     _pyfits = None
     @property
@@ -105,7 +107,7 @@
 
 _astropy = astropy_imports()
 
-class scipy_imports:
+class scipy_imports(object):
     _name = "scipy"
     _integrate = None
     @property
@@ -186,12 +188,27 @@
 
 _scipy = scipy_imports()
 
-class h5py_imports:
+class h5py_imports(object):
     _name = "h5py"
+    _err = None
+
+    def __init__(self):
+        try:
+            import h5py
+            if parse_version(h5py.__version__) < parse_version('2.4.0'):
+                self._err = RuntimeError(
+                    'yt requires h5py version 2.4.0 or newer, '
+                    'please update h5py with e.g. "pip install -U h5py" '
+                    'and try again')
+        except ImportError:
+            pass
+        super(h5py_imports, self).__init__()
 
     _File = None
     @property
     def File(self):
+        if self._err:
+            raise self._err
         if self._File is None:
             try:
                 from h5py import File
@@ -203,6 +220,8 @@
     _Group = None
     @property
     def Group(self):
+        if self._err:
+            raise self._err
         if self._Group is None:
             try:
                 from h5py import Group
@@ -214,6 +233,8 @@
     ___version__ = None
     @property
     def __version__(self):
+        if self._err:
+            raise self._err
         if self.___version__ is None:
             try:
                 from h5py import __version__
@@ -225,6 +246,8 @@
     _get_config = None
     @property
     def get_config(self):
+        if self._err:
+            raise self._err
         if self._get_config is None:
             try:
                 from h5py import get_config
@@ -236,6 +259,8 @@
     _h5f = None
     @property
     def h5f(self):
+        if self._err:
+            raise self._err
         if self._h5f is None:
             try:
                 import h5py.h5f as h5f
@@ -247,6 +272,8 @@
     _h5d = None
     @property
     def h5d(self):
+        if self._err:
+            raise self._err
         if self._h5d is None:
             try:
                 import h5py.h5d as h5d
@@ -258,6 +285,8 @@
     _h5s = None
     @property
     def h5s(self):
+        if self._err:
+            raise self._err
         if self._h5s is None:
             try:
                 import h5py.h5s as h5s
@@ -269,6 +298,8 @@
     _version = None
     @property
     def version(self):
+        if self._err:
+            raise self._err
         if self._version is None:
             try:
                 import h5py.version as version
@@ -279,7 +310,7 @@
 
 _h5py = h5py_imports()
 
-class nose_imports:
+class nose_imports(object):
     _name = "nose"
     _run = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/710e58b48c01/
Changeset:   710e58b48c01
Branch:      yt
User:        jzuhone
Date:        2016-05-24 10:47:11+00:00
Summary:     Merge
Affected #:  2 files

diff -r 094bd564f76c966e5965443c151e70ad2cb37890 -r 710e58b48c014bd370fcd8f3af9096943504e044 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1330,8 +1330,6 @@
 grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
 To load Gizmo datasets using the standard HDF5 output format::
 
-.. code-block:: python
-
    import yt
    ds = yt.load("snapshot_600.hdf5")
 

diff -r 094bd564f76c966e5965443c151e70ad2cb37890 -r 710e58b48c014bd370fcd8f3af9096943504e044 yt/utilities/lib/mesh_triangulation.pyx
--- a/yt/utilities/lib/mesh_triangulation.pyx
+++ b/yt/utilities/lib/mesh_triangulation.pyx
@@ -25,28 +25,34 @@
 @cython.cdivision(True)
 def triangulate_indices(np.ndarray[np.int64_t, ndim=2] indices):
     cdef np.int64_t num_elem = indices.shape[0]
-    cdef np.int64_t num_verts = indices.shape[1]
+    cdef np.int64_t VPE = indices.shape[1]  # num verts per element
+    cdef np.int64_t TPE  # num triangles per element
     cdef int[MAX_NUM_TRI][3] tri_array
-    cdef np.int64_t tri_per_elem
-
-    if (num_verts == 8 or num_verts == 20 or num_verts == 27):
-        tri_per_elem = HEX_NT
+    
+    if (VPE == 8 or VPE == 20 or VPE == 27):
+        TPE = HEX_NT
         tri_array = triangulate_hex
-    elif num_verts == 4:
-        tri_per_elem = TETRA_NT
+    elif VPE == 4:
+        TPE = TETRA_NT
         tri_array = triangulate_tetra
-    elif num_verts == 6:
-        tri_per_elem = WEDGE_NT
+    elif VPE == 6:
+        TPE = WEDGE_NT
         tri_array = triangulate_wedge
     else:
-        raise YTElementTypeNotRecognized(3, num_verts)
-
-    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
-    tri_indices = np.empty((tri_per_elem * num_elem, 3), dtype="int64")
+        raise YTElementTypeNotRecognized(3, VPE)
 
-    cdef np.int64_t i, j
+    cdef np.int64_t num_tri = TPE * num_elem
+        
+    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
+    tri_indices = np.empty((num_tri, 3), dtype="int64")
+    
+    cdef np.int64_t *tri_indices_ptr = <np.int64_t*> tri_indices.data
+    cdef np.int64_t *indices_ptr = <np.int64_t*> indices.data
+
+    cdef np.int64_t i, j, k, offset
     for i in range(num_elem):
-        for j in range(tri_per_elem):
+        for j in range(TPE):
             for k in range(3):
-                tri_indices[i*tri_per_elem + j][k] = indices[i][tri_array[j][k]]
+                offset = tri_array[j][k]
+                tri_indices_ptr[i*TPE*3 + 3*j + k] = indices_ptr[i*VPE + offset]
     return tri_indices


https://bitbucket.org/yt_analysis/yt/commits/ccbada241125/
Changeset:   ccbada241125
Branch:      yt
User:        jzuhone
Date:        2016-05-24 17:23:42+00:00
Summary:     Bugfix
Affected #:  1 file

diff -r 094bd564f76c966e5965443c151e70ad2cb37890 -r ccbada241125f581c5ae22eed6151825cb6b40dc yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -36,7 +36,7 @@
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):
     unit_system = registry.ds.unit_system
 
-    axis_names = registry.ds.coordinates.axis_order[0]
+    axis_names = registry.ds.coordinates.axis_order
 
     if (ftype,"magnetic_field_%s" % axis_names[0]) not in registry:
         return


https://bitbucket.org/yt_analysis/yt/commits/d366cff1d88b/
Changeset:   d366cff1d88b
Branch:      yt
User:        jzuhone
Date:        2016-05-24 17:23:59+00:00
Summary:     Merge
Affected #:  2 files

diff -r ccbada241125f581c5ae22eed6151825cb6b40dc -r d366cff1d88b389f5030d0372a875f4b608d201f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1330,8 +1330,6 @@
 grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
 To load Gizmo datasets using the standard HDF5 output format::
 
-.. code-block:: python
-
    import yt
    ds = yt.load("snapshot_600.hdf5")
 

diff -r ccbada241125f581c5ae22eed6151825cb6b40dc -r d366cff1d88b389f5030d0372a875f4b608d201f yt/utilities/lib/mesh_triangulation.pyx
--- a/yt/utilities/lib/mesh_triangulation.pyx
+++ b/yt/utilities/lib/mesh_triangulation.pyx
@@ -25,28 +25,34 @@
 @cython.cdivision(True)
 def triangulate_indices(np.ndarray[np.int64_t, ndim=2] indices):
     cdef np.int64_t num_elem = indices.shape[0]
-    cdef np.int64_t num_verts = indices.shape[1]
+    cdef np.int64_t VPE = indices.shape[1]  # num verts per element
+    cdef np.int64_t TPE  # num triangles per element
     cdef int[MAX_NUM_TRI][3] tri_array
-    cdef np.int64_t tri_per_elem
-
-    if (num_verts == 8 or num_verts == 20 or num_verts == 27):
-        tri_per_elem = HEX_NT
+    
+    if (VPE == 8 or VPE == 20 or VPE == 27):
+        TPE = HEX_NT
         tri_array = triangulate_hex
-    elif num_verts == 4:
-        tri_per_elem = TETRA_NT
+    elif VPE == 4:
+        TPE = TETRA_NT
         tri_array = triangulate_tetra
-    elif num_verts == 6:
-        tri_per_elem = WEDGE_NT
+    elif VPE == 6:
+        TPE = WEDGE_NT
         tri_array = triangulate_wedge
     else:
-        raise YTElementTypeNotRecognized(3, num_verts)
-
-    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
-    tri_indices = np.empty((tri_per_elem * num_elem, 3), dtype="int64")
+        raise YTElementTypeNotRecognized(3, VPE)
 
-    cdef np.int64_t i, j
+    cdef np.int64_t num_tri = TPE * num_elem
+        
+    cdef np.ndarray[np.int64_t, ndim=2] tri_indices
+    tri_indices = np.empty((num_tri, 3), dtype="int64")
+    
+    cdef np.int64_t *tri_indices_ptr = <np.int64_t*> tri_indices.data
+    cdef np.int64_t *indices_ptr = <np.int64_t*> indices.data
+
+    cdef np.int64_t i, j, k, offset
     for i in range(num_elem):
-        for j in range(tri_per_elem):
+        for j in range(TPE):
             for k in range(3):
-                tri_indices[i*tri_per_elem + j][k] = indices[i][tri_array[j][k]]
+                offset = tri_array[j][k]
+                tri_indices_ptr[i*TPE*3 + 3*j + k] = indices_ptr[i*VPE + offset]
     return tri_indices


https://bitbucket.org/yt_analysis/yt/commits/c4af69ee0c9c/
Changeset:   c4af69ee0c9c
Branch:      yt
User:        jzuhone
Date:        2016-07-21 20:54:12+00:00
Summary:     Merge
Affected #:  206 files

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,17 +37,22 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
 yt/utilities/lib/mesh_samplers.cpp
 yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_triangulation.c
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/pixelization_routines.c
@@ -60,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -651,7 +651,7 @@
 .. _multiple-PRs:
 
 Working with Multiple BitBucket Pull Requests
-+++++++++++++++++++++++++++++++++++++++++++++
+---------------------------------------------
 
 Once you become active developing for yt, you may be working on
 various aspects of the code or bugfixes at the same time.  Currently,

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c appveyor.yml
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,38 @@
+# AppVeyor.com is a Continuous Integration service to build and run tests under
+# Windows
+
+environment:
+
+  global:
+      PYTHON: "C:\\Miniconda-x64"
+
+  matrix:
+
+      - PYTHON_VERSION: "2.7"
+
+      - PYTHON_VERSION: "3.5"
+
+
+platform:
+    -x64
+
+install:
+    - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+
+    # Install the build and runtime dependencies of the project.
+    # Create a conda environment
+    - "conda create -q --yes -n test python=%PYTHON_VERSION%"
+    - "activate test"
+
+    # Check that we have the expected version of Python
+    - "python --version"
+
+    # Install specified version of numpy and dependencies
+    - "conda install -q --yes numpy nose setuptools ipython Cython sympy h5py matplotlib"
+    - "python setup.py develop"
+
+# Not a .NET project
+build: false
+
+test_script:
+  - "nosetests -e test_all_fields ."

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/helper_scripts/generate_doap.py
--- /dev/null
+++ b/doc/helper_scripts/generate_doap.py
@@ -0,0 +1,141 @@
+import os
+import hglib
+import pkg_resources
+from email.utils import parseaddr
+
+templates = {"header": r"""<Project xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns="http://usefulinc.com/ns/doap#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:admin="http://webns.net/mvcb/">
+ <name>The yt Project</name>
+ <shortname>yt</shortname>
+ <shortdesc>Multi-resolution volumetric analysis</shortdesc>
+ <description>yt is a python package for analyzing and visualizing volumetric, multi-resolution data.  Originally developed for astrophysical simulations, it is flexible enough to work with data from other domains such as weather, nuclear engineering, seismology and molecular dynamics.</description>
+ <homepage rdf:resource="http://yt-project.org/" />
+ <download-page rdf:resource="https://pypi.python.org/pypi/yt/" />
+ <download-mirror rdf:resource="http://bitbucket.org/yt_analysis/yt/" />
+ <bug-database rdf:resource="http://bitbucket.org/yt_analysis/yt/issues" />
+ <programming-language>python</programming-language>
+ <programming-language>cython</programming-language>
+ <license rdf:resource="http://usefulinc.com/doap/licenses/bsd" />
+ """,
+ "foaf": r"""<foaf:Person>
+     <foaf:name>%(realname)s</foaf:name>
+  </foaf:Person>
+  """,
+  "release": r"""
+	<release>
+		<Version>
+			<name>%(name)s</name>
+			<created>%(date)s</created>
+			<revision>%(revision)s</revision>
+		</Version>
+	</release>
+   """,
+  "footer": r"""
+ <repository> 
+   <HgRepository>
+     <browse rdf:resource='https://bitbucket.org/yt_analysis/yt/src' />
+     <location rdf:resource='https://bitbucket.org/yt_analysis/yt' />
+   </HgRepository>
+ </repository> 
+</Project>
+"""
+}
+
+known_releases = [
+    ("0.3"  , "2007-12-17"),
+    ("1.0.1", "2008-10-25"),
+    ("1.5"  , "2009-11-04"),
+    ("1.6"  , "2010-01-22"),
+    ("1.6.1", "2010-02-11"),
+    ("1.7"  , "2010-06-27"),
+]
+
+yt_provider = pkg_resources.get_provider("yt")
+yt_path = os.path.dirname(yt_provider.module_path)
+
+name_mappings = {
+        # Sometimes things get filtered out by hgchurn pointing elsewhere.
+        # So we can add them back in manually
+        "andrew.wetzel at yale.edu" : "Andrew Wetzel",
+        "df11c at my.fsu.edu": "Daniel Fenn",
+        "dnarayan at haverford.edu": "Desika Narayanan",
+        "jmtomlinson95 at gmail.com": "Joseph Tomlinson",
+        "kaylea.nelson at yale.edu": "Kaylea Nelson",
+        "tabel at slac.stanford.edu": "Tom Abel",
+        "pshriwise": "Patrick Shriwise",
+        "jnaiman": "Jill Naiman",
+        "gsiisg": "Geoffrey So",
+        "dcollins4096 at gmail.com": "David Collins",
+        "bcrosby": "Brian Crosby",
+        "astrugarek": "Antoine Strugarek",
+        "AJ": "Allyson Julian",
+}
+
+name_ignores = ["convert-repo"]
+
+lastname_sort = lambda a: a.rsplit(None, 1)[-1]
+
+def get_release_tags():
+    c = hglib.open(yt_path)
+    releases = {}
+    for name, rev, node, islocal in c.tags():
+        if name.startswith("yt-"):
+            releases[name] = node
+    rr = []
+    for name, node in sorted(releases.items()):
+        date = c.log(node)[-1][-1]
+        rr.append((date, name[3:]))
+    rr.sort()
+    return [(_[1], _[0].strftime("%Y-%M-%d")) for _ in rr]
+
+def developer_names():
+    cmd = hglib.util.cmdbuilder("churn", "-c")
+    c = hglib.open(yt_path)
+    emails = set([])
+    for dev in c.rawcommand(cmd).split("\n"):
+        if len(dev.strip()) == 0: continue
+        emails.add(dev.rsplit(None, 2)[0])
+    print("Generating real names for {0} emails".format(len(emails)))
+    names = set([])
+    for email in sorted(emails):
+        if email in name_ignores:
+            continue
+        if email in name_mappings:
+            names.add(name_mappings[email])
+            continue
+        cset = c.log(revrange="last(author('%s'))" % email)
+        if len(cset) == 0:
+            print("Error finding {0}".format(email))
+            realname = email
+        else:
+            realname, addr = parseaddr(cset[0][4])
+        if realname == '':
+            realname = email
+        if realname in name_mappings:
+            names.add(name_mappings[realname])
+            continue
+        realname = realname.decode('utf-8')
+        realname = realname.encode('ascii', 'xmlcharrefreplace')
+        names.add(realname)
+    #with open("devs.txt", "w") as f:
+    #    for name in sorted(names, key=lastname_sort):
+    #        f.write("%s\n" % name)
+    devs = list(names)
+    devs.sort(key=lastname_sort)
+    return devs
+
+def generate_doap():
+    dev_names = developer_names()
+    with open("doap.xml", "w") as f:
+        f.write(templates["header"])
+        for dev_name in dev_names:
+            f.write("<developer>\n")
+            f.write(templates["foaf"] % {'realname': dev_name})
+            f.write("</developer>\n")
+        for release in known_releases + get_release_tags():
+            f.write(templates["release"] % {
+                'name': "yt " + release[0], 'revision': release[0], 'date': release[1]}
+            )
+        f.write(templates["footer"])
+
+if __name__ == "__main__":
+    generate_doap()

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -119,6 +119,24 @@
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
+    if [ -n "${PYTHONPATH}" ]
+    then
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        echo
+        echo "The PYTHONPATH environment variable is set to:"
+        echo
+        echo "    $PYTHONPATH"
+        echo
+        echo "If dependencies of yt (numpy, scipy, matplotlib) are installed"
+        echo "to this path, this may cause issues. Exit the install script"
+        echo "with Ctrl-C and unset PYTHONPATH if you are unsure."
+        echo "Hit enter to continue."
+        echo
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        read -p "[hit enter]"
+    fi
 else
     if [ $INST_YT_SOURCE -eq 0 ]
     then
@@ -524,11 +542,11 @@
 echo
 
 printf "%-18s = %s so I " "INST_CONDA" "${INST_CONDA}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_CONDA}
 echo "be installing a conda-based python environment"
 
 printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_YT_SOURCE}
 echo "be compiling yt from source"
 
 printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}"
@@ -704,7 +722,7 @@
 if type -P curl &>/dev/null
 then
     echo "Using curl"
-    export GETFILE="curl -sSO"
+    export GETFILE="curl -sSOL"
 else
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -744,6 +762,12 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function test_install
+{
+    echo "Testing that yt can be imported"
+    ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import yt" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -1238,6 +1262,8 @@
     ( cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh 2>&1 ) 1>> ${LOG_FILE}
     sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
 
+    test_install
+
     function print_afterword
     {
         echo
@@ -1478,10 +1504,12 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE}
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 
+    test_install
+
     echo
     echo
     echo "========================================================================"

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/_static/yt_icon.png
Binary file doc/source/_static/yt_icon.png has changed

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
-   halo_transition
    halo_catalogs
-   halo_finders
    halo_mass_function
+   halo_transition
    halo_merger_tree
+   ellipsoid_analysis

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
-
-Creating Halo Catalogs
-----------------------
+Halo Finding and Analysis
+=========================
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
 For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+to yt-3.x, see :ref:`halo-transition`.
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+.. _halo_catalog_finding:
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+Halo Finding
+------------
+
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -14,14 +14,126 @@
 out of favor, as these discrete fields can be any type of sparsely populated
 data.
 
+What are fields?
+----------------
+
+Fields in yt are denoted by a two-element tuple, of the form ``(field_type,
+field_name)``. The first element, the "field type" is a category for a
+field. Possible field types used in yt include *gas* (for fluid mesh fields
+defined on a mesh) or *io* (for fields defined at particle locations). Field
+types can also correspond to distinct particle of fluid types in a single
+simulation. For example, a plasma physics simulation using the Particle in Cell
+method might have particle types corresponding to *electrons* and *ions*. See
+:ref:`known-field-types` below for more info about field types in yt.
+
+The second element of field tuples, the "field name", denotes the specific field
+to select, given the field type. Possible field names include *density*,
+*velocity_x* or *pressure* --- these three fields are examples of field names
+that might be used for a fluid defined on a mesh. Examples of particle fields
+include *particle_mass*, *particle_position*, or *particle_velocity_x*. In
+general, particle field names are prefixed by "particle\_", which makes it easy
+to distinguish between a particle field or a mesh field when no field type is
+provided.
+
+What fields are available?
+--------------------------
+
+We provide a full list of fields that yt recognizes by default at
+:ref:`field-list`.  If you want to create additional custom derived fields,
+see :ref:`creating-derived-fields`.
+
+Every dataset has an attribute, ``ds.fields``.  This attribute possesses
+attributes itself, each of which is a "field type," and each field type has as
+its attributes the fields themselves.  When one of these is printed, it returns
+information about the field and things like units and so on.  You can use this
+for tab-completing as well as easier access to information.
+
+As an example, you might browse the available fields like so:
+
+.. code-block:: python
+
+  print(dir(ds.fields))
+  print(dir(ds.fields.gas))
+  print(ds.fields.gas.density)
+
+On an Enzo dataset, the result from the final command would look something like
+this:::
+
+  Alias Field for "('enzo', 'Density')" (gas, density): (units: g/cm**3)
+
+You can use this to easily explore available fields, particularly through
+tab-completion in Jupyter/IPython.
+
+For a more programmatic method of accessing fields, you can utilize the
+``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
+information about fields.  The full list of fields available for a dataset can
+be found as the attribute ``field_list`` for native, on-disk fields and
+``derived_field_list`` for derived fields (``derived_field_list`` is a superset
+of ``field_list``).  You can view these lists by examining a dataset like this:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print(ds.field_list)
+   print(ds.derived_field_list)
+
+By using the ``field_info()`` class, one can access information about a given
+field, like its default units or the source code for it.
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   ds.index
+   print(ds.field_info["gas", "pressure"].get_units())
+   print(ds.field_info["gas", "pressure"].get_source())
+
+Using fields to access data
+---------------------------
+
+The primary *use* of fields in yt is to access data from a dataset. For example,
+if I want to use a data object (see :ref:`Data-objects` for more detail about
+data objects) to access the ``('gas', 'density')`` field, one can do any of the
+following:
+
+.. code-block:: python
+
+    ad = ds.all_data()
+
+    # just a field name
+    density = ad['density']
+
+    # field tuple with no parentheses
+    density = ad['gas', 'density']
+
+    # full field tuple
+    density = ad[('gas', 'density')]
+
+    # through the ds.fields object
+    density = ad[ds.fields.gas.density]
+
+The first data access example is the simplest. In that example, the field type
+is inferred from the name of the field. The next two examples use the field type
+explicitly, this might be necessary if there is more than one field type with a
+"density" field defined in the same dataset. The third example is slightly more
+verbose but is syntactically identical to the second example due to the way
+indexing works in the Python language.
+
+The final example uses the ``ds.fields`` object described above. This way of
+accessing fields lends itself to interactive use, especially if you make heavy
+use of IPython's tab completion features. Any of these ways of denoting the
+``('gas', 'density')`` field can be used when supplying a field name to a yt
+data object, analysis routines, or plotting and visualization function.
+
+Accessing Fields without a Field Type
+-------------------------------------
+
 In previous versions of yt, there was a single mechanism of accessing fields on
-a data container -- by their name, which was mandated to be a single string,
-and which often varied between different code frontends.  yt 3.0 allows
-for datasets containing multiple different types of fluid fields, mesh fields,
-particles (with overlapping or disjoint lists of fields).  To enable accessing
-these fields in a meaningful, simple way, the mechanism for accessing them has
-changed to take an optional *field type* in addition to the *field name* of
-the form ('*field type*', '*field name*').
+a data container -- by their name, which was mandated to be a single string, and
+which often varied between different code frontends.  yt 3.0 allows for datasets
+containing multiple different types of fluid fields, mesh fields, particles
+(with overlapping or disjoint lists of fields). However, to preserve backward
+compatibility and make interactive use simpler, yt will still accept field names
+given as a string and will try to infer the field type given a field name.
 
 As an example, we may be in a situation where have multiple types of particles
 which possess the ``particle_position`` field.  In the case where a data
@@ -30,9 +142,9 @@
 
 .. code-block:: python
 
-   print(ad["humans", "particle_position"])
-   print(ad["dogs", "particle_position"])
-   print(ad["dinosaurs", "particle_position"])
+   print(ad["dark_matter", "particle_position"])
+   print(ad["stars", "particle_position"])
+   print(ad["black_holes", "particle_position"])
 
 Each of these three fields may have different sizes.  In order to enable
 falling back on asking only for a field by the name, yt will use the most
@@ -45,7 +157,8 @@
 
    print(ad["particle_velocity"])
 
-it would select ``dinosaurs`` as the field type.
+it would select ``black_holes`` as the field type, since the last field accessed
+used that field type.
 
 The same operations work for fluid and mesh fields.  As an example, in some
 cosmology simulations, we may want to examine the mass of particles in a region
@@ -189,58 +302,6 @@
  * Species fields, such as for chemistry species (yt can recognize the entire
    periodic table in field names and construct ionization fields as need be)
 
-What fields are available?
---------------------------
-
-We provide a full list of fields that yt recognizes by default at
-:ref:`field-list`.  If you want to create additional custom derived fields,
-see :ref:`creating-derived-fields`.
-
-Every dataset has an attribute, ``ds.fields``.  This attribute possesses
-attributes itself, each of which is a "field type," and each field type has as
-its attributes the fields themselves.  When one of these is printed, it returns
-information about the field and things like units and so on.  You can use this
-for tab-completing as well as easier access to information.
-
-As an example, you might browse the available fields like so:
-
-.. code-block:: python
-
-  print(dir(ds.fields))
-  print(dir(ds.fields.gas))
-  print(ds.fields.gas.density)
-
-On an Enzo dataset, the result from the final command would look something like
-this:::
-
-  Alias Field for "('enzo', 'Density')" (gas, density): (units: g/cm**3)
-
-You can use this to easily explore available fields, particularly through
-tab-completion in Jupyter/IPython.
-
-For a more programmatic method of accessing fields, you can utilize the
-``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
-information about fields.  The full list of fields available for a dataset can
-be found as the attribute ``field_list`` for native, on-disk fields and
-``derived_field_list`` for derived fields (``derived_field_list`` is a superset
-of ``field_list``).  You can view these lists by examining a dataset like this:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print(ds.field_list)
-   print(ds.derived_field_list)
-
-By using the ``field_info()`` class, one can access information about a given
-field, like its default units or the source code for it.
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   ds.index
-   print(ds.field_info["gas", "pressure"].get_units())
-   print(ds.field_info["gas", "pressure"].get_source())
-
 .. _bfields:
 
 Magnetic Fields

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,7 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'numpydoc', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/colormaps.py
--- a/doc/source/cookbook/colormaps.py
+++ b/doc/source/cookbook/colormaps.py
@@ -7,11 +7,11 @@
 p = yt.ProjectionPlot(ds, "z", "density", width=(100, 'kpc'))
 p.save()
 
-# Change the colormap to 'jet' and save again.  We must specify
+# Change the colormap to 'dusk' and save again.  We must specify
 # a different filename here or it will save it over the top of
 # our first projection.
-p.set_cmap(field="density", cmap='jet')
-p.save('proj_with_jet_cmap.png')
+p.set_cmap(field="density", cmap='dusk')
+p.save('proj_with_dusk_cmap.png')
 
 # Change the colormap to 'hot' and save again.
 p.set_cmap(field="density", cmap='hot')

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
-
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
+
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -85,6 +85,7 @@
 cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
+# Recommended resolution for YouTube 360-degree videos is [3840, 2160]
 cam.resolution = [500, 250]
 # Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
 # from this point in spherical coordinate.
@@ -99,9 +100,11 @@
 
 # Stereo-spherical lens
 cam = sc.add_camera(ds, lens_type='stereo-spherical')
-# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
-# will generate the final image with both left-eye and right-eye ones jointed together.
-cam.resolution = [1000, 250]
+# Set the size ratio of the final projection to be 1:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together,
+# with left-eye image on top and right-eye image on bottom.
+# Recommended resolution for YouTube virtual reality videos is [3840, 2160]
+cam.resolution = [500, 500]
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
-
-import numpy as np
-import pylab
-
 import yt
-import yt.visualization.volume_rendering.old_camera as vr
-
-ds = yt.load("maestro_subCh_plt00248")
-
-dd = ds.all_data()
-
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
-
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
-
-mi, ma = min(vals), max(vals)
-
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
 
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
+sc = yt.create_scene(ds, lens_type='perspective')
 
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
+source = sc[0]
 
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
+source.set_field('density')
+source.set_log(True)
 
-# tell the camera to use our figure
-cam._render_figure = f
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
+
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
+
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/developing/extensions.rst
--- /dev/null
+++ b/doc/source/developing/extensions.rst
@@ -0,0 +1,54 @@
+.. _extensions:
+
+Extension Packages
+==================
+
+.. note:: For some additional discussion, see `YTEP-0029
+          <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
+          this plan was designed.
+
+As of version 3.3 of yt, we have put into place new methods for easing the
+process of developing "extensions" to yt.  Extensions might be analysis
+packages, visualization tools, or other software projects that use yt as a base
+engine but that are versioned, developed and distributed separately.  This
+brings with it the advantage of retaining control over the versioning,
+contribution guidelines, scope, etc, while also providing a mechanism for
+disseminating information about it, and potentially a method of interacting
+with other extensions.
+
+We have created a few pieces of infrastructure for developing extensions,
+making them discoverable, and distributing them to collaborators.
+
+If you have a module you would like to retain some external control over, or
+that you don't feel would fit into yt, we encourage you to build it as an
+extension module and distribute and version it independently.
+
+Hooks for Extensions
+--------------------
+
+Starting with version 3.3 of yt, any package named with the prefix ``yt_`` is
+importable from the namespace ``yt.extensions``.  For instance, the
+``yt_interaction`` package ( https://bitbucket.org/data-exp-lab/yt_interaction
+) is importable as ``yt.extensions.interaction``.
+
+In subsequent versions, we plan to include in yt a catalog of known extensions
+and where to find them; this will put discoverability directly into the code
+base.
+
+Extension Template
+------------------
+
+A template for starting an extension module (or converting an existing set of
+code to an extension module) can be found at
+https://bitbucket.org/yt_analysis/yt_extension_template .
+
+To get started, download a zipfile of the template (
+https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+follow the directions in ``README.md`` to modify the metadata.
+
+Distributing Extensions
+-----------------------
+
+We encourage you to version on your choice of hosting platform (Bitbucket,
+GitHub, etc), and to distribute your extension widely.  We are presently
+working on deploying a method for listing extension modules on the yt webpage.

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -19,6 +19,7 @@
    developing
    building_the_docs
    testing
+   extensions
    debugdrive
    releasing
    creating_datatypes

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -95,17 +95,17 @@
 To create new unit tests:
 
 #. Create a new ``tests/`` directory next to the file containing the
-   functionality you want to test.  Be sure to add this new directory as a
-   subpackage in the setup.py script located in the directory you're adding a
-   new ``tests/`` folder to.  This ensures that the tests will be deployed in
-   yt source and binary distributions.
+   functionality you want to test and add an empty ``__init__.py`` file to
+   it.
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a tuple of the form
-   ``function``, ``argument_one``, ``argument_two``, etc.  For example
-   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
-   asserts that 1.0 is equal to 1.0.
+   accept no arguments. The test function should do some work that tests some
+   functionality and should also verify that the results are correct using
+   assert statements or functions.  
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+   ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
+   captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -470,9 +470,9 @@
        test.prefix = "my_unique_name"
 
        # this ensures a nice test name in nose's output
-       test_my_ds.__description__ = test.description
+       test_my_ds.__name__ = test.description
 
-       yield test_my_ds
+       yield test
 
 Another good example of an image comparison test is the
 ``PlotWindowAttributeTest`` defined in the answer testing framework and used in
@@ -487,7 +487,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e47e12f72f1b/
Changeset:   e47e12f72f1b
Branch:      yt
User:        jzuhone
Date:        2016-07-22 20:37:11+00:00
Summary:     Merge
Affected #:  7 files

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f optional-requirements.txt
--- a/optional-requirements.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-ipython[notebook]
\ No newline at end of file

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f requirements.txt
--- a/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-numpy==1.9.2 
-matplotlib==1.4.3 
-Cython==0.22 
-h5py==2.5.0 
-nose==1.3.6 
-sympy==0.7.6 
-

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_002:
+  local_varia_003:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -206,7 +206,7 @@
             dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
-                if self.data_series[0].field_info[fd].particle_type:
+                if self.data_series[0]._get_field_info(*fd).particle_type:
                     self.particle_fields.append(field)
             particles = np.empty((self.num_indices,self.num_steps))
             particles[:] = np.nan
@@ -339,7 +339,7 @@
         """
         fid = h5py.File(filename, "w")
         fields = [field for field in sorted(self.field_data.keys())]
-        fid.create_dataset("particle_indices", dtype=np.int32,
+        fid.create_dataset("particle_indices", dtype=np.int64,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         for field in fields:

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -288,7 +288,7 @@
 
         tmpspec += np.interp(emid, e_pseudo*self.scale_factor, pseudo)*de/self.scale_factor
 
-        return tmpspec
+        return tmpspec*self.scale_factor
 
     def get_spectrum(self, kT):
         """

diff -r c4af69ee0c9cf00771ae8e3fcec61996a3345d5c -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -106,23 +106,19 @@
     convert_old_file(old_photon_file, "converted_photons.h5")
     convert_old_file(old_event_file, "converted_events.h5")
 
-    photons3 = PhotonList.from_file("converted_photons.h5")
-    events3 = EventList.from_h5_file("converted_events.h5")
+    PhotonList.from_file("converted_photons.h5")
+    EventList.from_h5_file("converted_events.h5")
 
     for k in photons1.keys():
         if k == "Energy":
             arr1 = uconcatenate(photons1[k])
             arr2 = uconcatenate(photons2[k])
-            arr3 = uconcatenate(photons3[k])
         else:
             arr1 = photons1[k]
             arr2 = photons2[k]
-            arr3 = photons3[k]
         assert_almost_equal(arr1, arr2)
-        assert_almost_equal(arr1, arr3)
     for k in events1.keys():
         assert_almost_equal(events1[k], events2[k])
-        assert_almost_equal(events1[k], events3[k])
 
     nevents = 0
 


https://bitbucket.org/yt_analysis/yt/commits/fcee11d31456/
Changeset:   fcee11d31456
Branch:      yt
User:        jzuhone
Date:        2016-07-25 04:02:49+00:00
Summary:     Merge
Affected #:  82 files

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1489,7 +1489,7 @@
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,9 +67,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,7 @@
 SHADERS_FILES = glob.glob(os.path.join(SHADERS_DIR, "*.vertexshader")) + \
     glob.glob(os.path.join(SHADERS_DIR, "*.fragmentshader"))
 
-VERSION = "3.3.dev0"
+VERSION = "3.4.dev0"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,9 +67,11 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_000:
+  local_absorption_spectrum_001:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
 
 other_tests:
   unittests:

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -72,7 +72,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-__version__ = "3.3-dev"
+__version__ = "3.4-dev"
 
 # First module imports
 import numpy as np # For modern purposes

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -26,17 +26,21 @@
 import shutil
 from yt.utilities.on_demand_imports import \
     _h5py as h5
+from yt.convenience import load
 
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
+GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
 
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
 def test_absorption_spectrum_cosmo():
     """
-    This test generates an absorption spectrum from a cosmological light ray
+    This test generates an absorption spectrum from a compound light ray on a
+    grid dataset
     """
     # Set up in a temp dir
     tmpdir = tempfile.mkdtemp()
@@ -92,7 +96,8 @@
 @requires_answer_testing()
 def test_absorption_spectrum_non_cosmo():
     """
-    This test generates an absorption spectrum from a non-cosmological light ray
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
     """
 
     # Set up in a temp dir
@@ -244,3 +249,114 @@
     a = 1.7e-4
     x = np.linspace(5.0, -3.6, 60)
     yield assert_allclose_units, voigt_old(a, x), voigt_scipy(a, x), 1e-8
+
+ at requires_file(GIZMO_PLUS)
+ at requires_answer_testing()
+def test_absorption_spectrum_cosmo_sph():
+    """
+    This test generates an absorption spectrum from a compound light ray on a
+    particle dataset
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01)
+
+    lr.make_light_ray(seed=1234567,
+                      fields=[('gas', 'temperature'), 
+                              ('gas', 'H_number_density')],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
+
+    my_label = 'HI Lya'
+    field = ('gas', 'H_number_density')
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'HI Lya'
+    field = ('gas', 'H_number_density')
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_cosmo_sph".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_cosmo_sph.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(GIZMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_sph():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(GIZMO_PLUS_SINGLE)
+    lr = LightRay(ds)
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=[('gas', 'temperature'), 
+                              ('gas', 'H_number_density')],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = ('gas', 'H_number_density')
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_sph".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_sph.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,6 +21,8 @@
     load
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
+from yt.units.unit_object import \
+    Unit
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
@@ -603,10 +605,18 @@
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
         # Only return LightRay elements with non-zero density
-        if 'density' in data:
-            mask = data['density'] > 0
-            for key in data.keys():
-                data[key] = data[key][mask]
+        mask_field_units = ['K', 'cm**-3', 'g/cm**3']
+        mask_field_units = [Unit(u) for u in mask_field_units]
+        for f in data:
+            for u in mask_field_units:
+                if data[f].units.same_dimensions_as(u):
+                    mask = data[f] > 0
+                    if not np.any(mask):
+                        raise RuntimeError(
+                            "No zones along light ray with nonzero %s. "
+                            "Please modify your light ray trajectory." % (f,))
+                    for key in data.keys():
+                        data[key] = data[key][mask]
         save_as_dataset(ds, filename, data, field_types=field_types,
                         extra_attrs=extra_attrs)
 

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -97,7 +97,11 @@
                     dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
             self.length_unit = self.quan(self.unit_base["UnitLength_in_cm"],
                                          "cmcm / h", registry=self.unit_registry)
-            self.box_size *= self.length_unit.in_units("Mpccm / h")
+            self.mass_unit = self.quan(self.unit_base["UnitMass_in_g"],
+                                         "g / h", registry=self.unit_registry)
+            self.box_size = self.box_size * self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             # Read time from file for non-cosmological sim
             self.time_unit = self.quan(
@@ -322,6 +326,9 @@
 
             self.parameters[param] = vals
 
+        # Domain dimensions for Gadget datasets are always 2x2x2 for octree
+        self.domain_dimensions = np.array([2,2,2])
+
         if self.parameters["ComovingIntegrationOn"]:
             cosmo_attr = {"box_size": "BoxSize",
                           "omega_lambda": "OmegaLambda",
@@ -334,22 +341,36 @@
                 if v not in self.parameters:
                     raise MissingParameter(self.parameter_filename, v)
                 setattr(self, a, self.parameters[v])
+            self.domain_left_edge = np.array([0., 0., 0.])
+            self.domain_right_edge = np.array([1., 1., 1.]) * self.parameters['BoxSize']
         else:
             self.cosmological_simulation = 0
             self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = 0.0
 
+    def _find_data_dir(self):
+        """
+        Find proper location for datasets.  First look where parameter file
+        points, but if this doesn't exist then default to the current 
+        directory.
+        """
+        if self.parameters["OutputDir"].startswith("/"):
+            data_dir = self.parameters["OutputDir"]
+        else:
+            data_dir = os.path.join(self.directory,
+                                    self.parameters["OutputDir"])
+        if not os.path.exists(data_dir):
+            mylog.info("OutputDir not found at %s, instead using %s." % 
+                       (data_dir, self.directory))
+            data_dir = self.directory
+        self.data_dir = data_dir
+
     def _snapshot_format(self, index=None):
         """
         The snapshot filename for a given index.  Modify this for different 
         naming conventions.
         """
 
-        if self.parameters["OutputDir"].startswith("/"):
-            data_dir = self.parameters["OutputDir"]
-        else:
-            data_dir = os.path.join(self.directory,
-                                    self.parameters["OutputDir"])
         if self.parameters["NumFilesPerSnapshot"] > 1:
             suffix = ".0"
         else:
@@ -362,20 +383,25 @@
             count = "%03d" % index
         filename = "%s_%s%s" % (self.parameters["SnapshotFileBase"],
                                 count, suffix)
-        return os.path.join(data_dir, filename)
+        return os.path.join(self.data_dir, filename)
                 
     def _get_all_outputs(self, find_outputs=False):
         """
         Get all potential datasets and combine into a time-sorted list.
         """
 
+        # Find the data directory where the outputs are
+        self._find_data_dir()
+
         # Create the set of outputs from which further selection will be done.
         if find_outputs:
             self._find_outputs()
         else:
             if self.parameters["OutputListOn"]:
                 a_values = [float(a) for a in 
-                            open(self.parameters["OutputListFilename"], "r").readlines()]
+                            open(os.path.join(self.data_dir, 
+                                 self.parameters["OutputListFilename"]), 
+                            "r").readlines()]
             else:
                 a_values = [float(self.parameters["TimeOfFirstSnapshot"])]
                 time_max = float(self.parameters["TimeMax"])
@@ -437,7 +463,6 @@
         Search for directories matching the data dump keywords.
         If found, get dataset times py opening the ds.
         """
-
         potential_outputs = glob.glob(self._snapshot_format())
         self.all_outputs = self._check_for_outputs(potential_outputs)
         self.all_outputs.sort(key=lambda obj: obj["time"])

diff -r e47e12f72f1b435f7f33345b8a4397159e9f5d5f -r fcee11d314565cba204d22e727bf3b4f1748c43a yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -196,8 +196,10 @@
 
         # Some data containers can't be recontructed in the same way
         # since this is now particle-like data.
-        if self.parameters["container_type"] in \
-          ["cutting", "proj", "ray", "slice"]:
+        data_type = self.parameters["data_type"]
+        container_type = self.parameters["container_type"]
+        ex_container_type = ["cutting", "proj", "ray", "slice"]
+        if data_type == "yt_light_ray" or container_type in ex_container_type:
             mylog.info("Returning an all_data data container.")
             return self.all_data()
 


https://bitbucket.org/yt_analysis/yt/commits/6aed2c7ac9a0/
Changeset:   6aed2c7ac9a0
Branch:      yt
User:        jzuhone
Date:        2016-07-25 04:39:37+00:00
Summary:     We don't have children or parents in Athena++
Affected #:  1 file

diff -r d366cff1d88b389f5030d0372a875f4b608d201f -r 6aed2c7ac9a03e4a388f93f59fa0ed64d4952065 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -146,7 +146,7 @@
     def __init__(self, id, index, level):
         AMRGridPatch.__init__(self, id, filename = index.index_filename,
                               index = index)
-        self.Parent = []
+        self.Parent = None
         self.Children = []
         self.Level = level
 
@@ -154,12 +154,9 @@
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.ds.refine_by
-        else:
-            LE, RE = self.index.grid_left_edge[id,:], \
-                     self.index.grid_right_edge[id,:]
-            self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
+        LE, RE = self.index.grid_left_edge[id,:], \
+            self.index.grid_right_edge[id,:]
+        self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
         if self.ds.dimensionality < 2: self.dds[1] = 1.0
         if self.ds.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -221,30 +218,8 @@
         for g in self.grids:
             g._prepare_grid()
             g._setup_dx()
-        self._reconstruct_parent_child()
         self.max_level = self._handle.attrs["MaxLevel"]
 
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child.Parent.append(grid)
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
     def _chunk_io(self, dobj, cache = True, local_only = False):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in gobjs:


https://bitbucket.org/yt_analysis/yt/commits/b1a8f5add408/
Changeset:   b1a8f5add408
Branch:      yt
User:        jzuhone
Date:        2016-07-25 04:42:56+00:00
Summary:     Merge
Affected #:  1 file

diff -r fcee11d314565cba204d22e727bf3b4f1748c43a -r b1a8f5add408b58c609ec50cfb4bb6b9b32ca510 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -146,7 +146,7 @@
     def __init__(self, id, index, level):
         AMRGridPatch.__init__(self, id, filename = index.index_filename,
                               index = index)
-        self.Parent = []
+        self.Parent = None
         self.Children = []
         self.Level = level
 
@@ -154,12 +154,9 @@
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.ds.refine_by
-        else:
-            LE, RE = self.index.grid_left_edge[id,:], \
-                     self.index.grid_right_edge[id,:]
-            self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
+        LE, RE = self.index.grid_left_edge[id,:], \
+            self.index.grid_right_edge[id,:]
+        self.dds = self.ds.arr((RE-LE)/self.ActiveDimensions, "code_length")
         if self.ds.dimensionality < 2: self.dds[1] = 1.0
         if self.ds.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -221,30 +218,8 @@
         for g in self.grids:
             g._prepare_grid()
             g._setup_dx()
-        self._reconstruct_parent_child()
         self.max_level = self._handle.attrs["MaxLevel"]
 
-    def _reconstruct_parent_child(self):
-        mask = np.empty(len(self.grids), dtype='int32')
-        mylog.debug("First pass; identifying child grids")
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(self.grid_left_edge[i,:],
-                                self.grid_right_edge[i,:],
-                                self.grid_levels[i] + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
-        mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
-            for child in grid.Children:
-                child.Parent.append(grid)
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
     def _chunk_io(self, dobj, cache = True, local_only = False):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in gobjs:


https://bitbucket.org/yt_analysis/yt/commits/7adc91982647/
Changeset:   7adc91982647
Branch:      yt
User:        jzuhone
Date:        2016-07-25 04:58:02+00:00
Summary:     Fix temperature units
Affected #:  1 file

diff -r b1a8f5add408b58c609ec50cfb4bb6b9b32ca510 -r 7adc9198264778d09b0c34e461a7e5ef04892b6b yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -264,7 +264,8 @@
         """
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
-        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
+                          ("temperature", "K")]:
             # We set these to cgs for now, but they may be overridden later.
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
@@ -276,6 +277,9 @@
             self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                          (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
+        temp_unit = getattr(self, "temperature_unit", None)
+        if temp_unit is None:
+            self.temperature_unit = self.quan(1.0, "K")
         self.velocity_unit = self.length_unit / self.time_unit
 
     def _parse_parameter_file(self):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list