[yt-svn] commit/yt: 12 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed May 18 11:11:24 PDT 2016


12 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/f5773efbe337/
Changeset:   f5773efbe337
Branch:      yt
User:        jzuhone
Date:        2016-04-28 17:50:54+00:00
Summary:     Putting together a FLASHParticleDataset class
Affected #:  1 file

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r f5773efbe3374836c47765b28e6bf267efdecc8c yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -30,6 +30,9 @@
 from yt.utilities.physical_ratios import cm_per_mpc
 from .fields import FLASHFieldInfo
 
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
     #__slots__ = ["_level_id", "stop_index"]
@@ -237,8 +240,6 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         self.temperature_unit = self.quan(temperature_factor, "K")
-        # Still need to deal with:
-        #self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         
     def set_code_units(self):
@@ -423,3 +424,47 @@
 
     def close(self):
         self._handle.close()
+
+class FLASHParticleDataset(FLASHDataset):
+    _index_class = ParticleIndex
+    over_refine_factor = 1
+    filter_bbox = False
+
+    def __init__(self, filename, dataset_type='flash_particle_hdf5',
+                 storage_filename = None,
+                 units_override = None,
+                 unit_system = "cgs"):
+
+        if self._handle is not None: return
+        self._handle = HDF5FileHandler(filename)
+
+        self.refine_by = 2
+
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.storage_filename = storage_filename
+
+    def _parse_parameter_file(self):
+        # Let the superclass do all the work but then
+        # fix the domain dimensions
+        super(FLASHParticleDataset, self)._parse_parameter_file()
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.zeros(3, "int32")
+        self.domain_dimensions[:self.dimensionality] = nz
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = HDF5FileHandler(args[0])
+            if "bounding box" not in fileh["/"].keys() \
+                and "localnp" in fileh["/"].keys():
+                return True
+        except:
+            pass
+        return False
+
+    @classmethod
+    def _guess_candidates(cls, base, directories, files):
+        candidates = [_ for _ in files if "_hdf5_part_" in _]
+        # Typically, Flash won't have nested outputs.
+        return candidates, (len(candidates) == 0)


https://bitbucket.org/yt_analysis/yt/commits/456b892a977a/
Changeset:   456b892a977a
Branch:      yt
User:        jzuhone
Date:        2016-04-28 18:04:16+00:00
Summary:     Beginnings of some I/O for this as well
Affected #:  1 file

diff -r f5773efbe3374836c47765b28e6bf267efdecc8c -r 456b892a977a9ebad4888635242e12fd247b7a82 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -34,6 +34,16 @@
         seq = list(v[1] for v in g)
         yield seq
 
+def determine_particle_fields(handle):
+    try:
+        particle_fields = [s[0].decode("ascii","ignore").strip()
+                           for s in handle["/particle names"][:]]
+        _particle_fields = dict([("particle_" + s, i) for i, s in
+                                 enumerate(particle_fields)])
+    except KeyError:
+        _particle_fields = {}
+    return _particle_fields
+
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = False
     _dataset_type = "flash_hdf5"
@@ -43,15 +53,7 @@
         # Now we cache the particle fields
         self._handle = ds._handle
         self._particle_handle = ds._particle_handle
-        
-        try :
-            particle_fields = [s[0].decode("ascii","ignore").strip()
-                               for s in
-                               self._particle_handle["/particle names"][:]]
-            self._particle_fields = dict([("particle_" + s, i) for i, s in
-                                          enumerate(particle_fields)])
-        except KeyError:
-            self._particle_fields = {}
+        self._particle_fields = determine_particle_fields(self._particle_handle)
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -154,3 +156,15 @@
                     rv[g.id][field] = np.asarray(data[...,i], "=f8")
         return rv
 
+class IOHandlerFLASHParticle(BaseIOHandler):
+    _particle_reader = True
+    _dataset_type = "flash_particle_hdf5"
+
+    def __init__(self, ds):
+        super(IOHandlerFLASHParticle, self).__init__(ds)
+        # Now we cache the particle fields
+        self._handle = ds._handle
+        self._particle_fields = determine_particle_fields(self._handle)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/b7d9caadd5bf/
Changeset:   b7d9caadd5bf
Branch:      yt
User:        jzuhone
Date:        2016-05-03 03:26:51+00:00
Summary:     FLASH particles I/O and the rest of what we need to get this working.
Affected #:  2 files

diff -r 456b892a977a9ebad4888635242e12fd247b7a82 -r b7d9caadd5bfbda4dbc744eaa21caca3e7751452 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -24,7 +24,7 @@
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.data_objects.static_output import \
-    Dataset
+    Dataset, ParticleFile
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.physical_ratios import cm_per_mpc
@@ -425,21 +425,24 @@
     def close(self):
         self._handle.close()
 
+class FLASHParticleFile(ParticleFile):
+    pass
+
 class FLASHParticleDataset(FLASHDataset):
     _index_class = ParticleIndex
     over_refine_factor = 1
     filter_bbox = False
+    _file_class = FLASHParticleFile
 
     def __init__(self, filename, dataset_type='flash_particle_hdf5',
                  storage_filename = None,
                  units_override = None,
-                 unit_system = "cgs"):
+                 n_ref = 64, unit_system = "cgs"):
 
         if self._handle is not None: return
         self._handle = HDF5FileHandler(filename)
-
+        self.n_ref = n_ref
         self.refine_by = 2
-
         Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                          unit_system=unit_system)
         self.storage_filename = storage_filename
@@ -451,6 +454,8 @@
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.zeros(3, "int32")
         self.domain_dimensions[:self.dimensionality] = nz
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 456b892a977a9ebad4888635242e12fd247b7a82 -r b7d9caadd5bfbda4dbc744eaa21caca3e7751452 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -20,6 +20,8 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import AlwaysSelector
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def particle_sequences(grids):
@@ -165,6 +167,63 @@
         # Now we cache the particle fields
         self._handle = ds._handle
         self._particle_fields = determine_particle_fields(self._handle)
+        self._position_fields = [self._particle_fields["particle_pos%s" % ax]
+                                 for ax in 'xyz']
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        for data_file in sorted(data_files):
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                x = np.asarray(p_fields[:, px], dtype="=f8")
+                y = np.asarray(p_fields[:, py], dtype="=f8")
+                z = np.asarray(p_fields[:, pz], dtype="=f8")
+                yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        for data_file in sorted(data_files):
+            for ptype, field_list in sorted(ptf.items()):
+                x = np.asarray(p_fields[:, px], dtype="=f8")
+                y = np.asarray(p_fields[:, py], dtype="=f8")
+                z = np.asarray(p_fields[:, pz], dtype="=f8")
+                mask = selector.select_points(x, y, z, 0.0)
+                if mask is None: continue
+                for field in field_list:
+                    fi = self._particle_fields[field]
+                    data = p_fields[:, fi]
+                    yield (ptype, field), data[mask]
+
+    def _initialize_index(self, data_file, regions):
+        p_fields = self._handle["/tracer particles"]
+        morton = []
+        pos = np.column_stack(np.asarray(p_fields[:, i], dtype="=f8")
+                              for i in self._position_fields)
+        regions.add_data_file(pos, data_file.file_id)
+        morton.append(compute_morton(
+                      pos[:,0], pos[:,1], pos[:,2],
+                      data_file.ds.domain_left_edge,
+                      data_file.ds.domain_right_edge))
+        return np.concatenate(morton)
+
+    def _count_particles(self, data_file):
+        pcount = {"io": self._handle["/localnp"][:].sum()}
+        return pcount
+
+    def _identify_fields(self, data_file):
+        fields = [("io", field) for field in self._particle_fields]
+        return fields, {}


https://bitbucket.org/yt_analysis/yt/commits/fed3cf49e1f6/
Changeset:   fed3cf49e1f6
Branch:      yt
User:        jzuhone
Date:        2016-05-03 03:37:38+00:00
Summary:     Simplifying things a little
Affected #:  1 file

diff -r b7d9caadd5bfbda4dbc744eaa21caca3e7751452 -r fed3cf49e1f6b74ac1581325bf99a66b2b460136 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -210,15 +210,13 @@
 
     def _initialize_index(self, data_file, regions):
         p_fields = self._handle["/tracer particles"]
-        morton = []
         pos = np.column_stack(np.asarray(p_fields[:, i], dtype="=f8")
                               for i in self._position_fields)
         regions.add_data_file(pos, data_file.file_id)
-        morton.append(compute_morton(
-                      pos[:,0], pos[:,1], pos[:,2],
-                      data_file.ds.domain_left_edge,
-                      data_file.ds.domain_right_edge))
-        return np.concatenate(morton)
+        morton = compute_morton(pos[:,0], pos[:,1], pos[:,2],
+                                data_file.ds.domain_left_edge,
+                                data_file.ds.domain_right_edge)
+        return morton
 
     def _count_particles(self, data_file):
         pcount = {"io": self._handle["/localnp"][:].sum()}


https://bitbucket.org/yt_analysis/yt/commits/16dfb292660c/
Changeset:   16dfb292660c
Branch:      yt
User:        jzuhone
Date:        2016-05-03 14:16:01+00:00
Summary:     API entries and doc information
Affected #:  2 files

diff -r fed3cf49e1f6b74ac1581325bf99a66b2b460136 -r 16dfb292660c9b4eddbf097efef5128abde02fb8 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -775,32 +775,31 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+a plot file, checkpoint file, or particle file. Particle files require special handling
+depending on the situation, the main issue being that they typically lack grid information. 
+The first case is when you have a plotfile and a particle file that you would like to 
+load together. For instance, if you were in a directory with the following files:
 
 .. code-block:: none
 
-   cosmoSim_coolhdf5_chk_0026
+   radio_halo_1kpc_hdf5_plt_cnt_0100
+   radio_halo_1kpc_hdf5_part_0100
 
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
+where the plotfile and the particle file were created at the same time (therefore having 
+particle data consistent with the grid structure of the former), its data may be loaded 
+with the ``particle_filename`` optional argument:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
+However, if you don't have a corresponding plotfile for a particle file, but would still 
+like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+grid information will not be available, and the particle data will be loaded in a fashion
+similar to SPH data. 
+
 .. rubric:: Caveats
 
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to

diff -r fed3cf49e1f6b74ac1581325bf99a66b2b460136 -r 16dfb292660c9b4eddbf097efef5128abde02fb8 yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -16,12 +16,14 @@
 from .data_structures import \
       FLASHGrid, \
       FLASHHierarchy, \
-      FLASHDataset
+      FLASHDataset, \
+      FLASHParticleDataset
 
 from .fields import \
       FLASHFieldInfo
 
 from .io import \
-      IOHandlerFLASH
+      IOHandlerFLASH, \
+      IOHandlerFLASHParticle
 
 from . import tests


https://bitbucket.org/yt_analysis/yt/commits/c5a3bbaa6315/
Changeset:   c5a3bbaa6315
Branch:      yt
User:        jzuhone
Date:        2016-05-03 17:46:32+00:00
Summary:     Tests for FLASHParticleDataset
Affected #:  1 file

diff -r 16dfb292660c9b4eddbf097efef5128abde02fb8 -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -20,8 +20,11 @@
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    data_dir_load
-from yt.frontends.flash.api import FLASHDataset
+    data_dir_load, \
+    sph_answer
+from yt.frontends.flash.api import FLASHDataset, \
+    FLASHParticleDataset
+from collections import OrderedDict
 
 _fields = ("temperature", "density", "velocity_magnitude")
 
@@ -45,7 +48,6 @@
         test_wind_tunnel.__name__ = test.description
         yield test
 
-
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
@@ -54,3 +56,28 @@
 def test_units_override():
     for test in units_override_check(sloshing):
         yield test
+
+fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
+
+fid_1to3_b1_fields = OrderedDict(
+    [
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "all_cic_velocity_x"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_y"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_z"), ("deposit", "all_cic")),
+    ]
+)
+
+
+ at requires_file(fid_1to3_b1)
+def test_FLASHParticleDataset():
+    assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset)
+
+ at requires_ds(fid_1to3_b1, big_data=True)
+def test_fid_1to3_b1():
+    ds = data_dir_load(fid_1to3_b1)
+    for test in sph_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields):
+        test_fid_1to3_b1.__name__ = test.description
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/2f68929d5015/
Changeset:   2f68929d5015
Branch:      yt
User:        jzuhone
Date:        2016-05-12 13:18:15+00:00
Summary:     Chunking for particle fields in FLASH
Affected #:  1 file

diff -r c5a3bbaa6315ba497fe6c3c015dd36da95618707 -r 2f68929d5015328e94ae59969a823c674bdd89d9 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -169,6 +169,7 @@
         self._particle_fields = determine_particle_fields(self._handle)
         self._position_fields = [self._particle_fields["particle_pos%s" % ax]
                                  for ax in 'xyz']
+        self._chunksize = 32**3
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
@@ -176,46 +177,71 @@
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
         data_files = set([])
+        assert(len(ptf) == 1)
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         px, py, pz = self._position_fields
         p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
         for data_file in sorted(data_files):
-            # This double-reads
+            pcount = self._count_particles(data_file)["io"]
             for ptype, field_list in sorted(ptf.items()):
-                x = np.asarray(p_fields[:, px], dtype="=f8")
-                y = np.asarray(p_fields[:, py], dtype="=f8")
-                z = np.asarray(p_fields[:, pz], dtype="=f8")
-                yield ptype, (x, y, z)
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    yield ptype, (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
         data_files = set([])
+        assert(len(ptf) == 1)
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         px, py, pz = self._position_fields
         p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
         for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
             for ptype, field_list in sorted(ptf.items()):
-                x = np.asarray(p_fields[:, px], dtype="=f8")
-                y = np.asarray(p_fields[:, py], dtype="=f8")
-                z = np.asarray(p_fields[:, pz], dtype="=f8")
-                mask = selector.select_points(x, y, z, 0.0)
-                if mask is None: continue
-                for field in field_list:
-                    fi = self._particle_fields[field]
-                    data = p_fields[:, fi]
-                    yield (ptype, field), data[mask]
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        fi = self._particle_fields[field]
+                        data = p_fields[total-count:total, fi]
+                        yield (ptype, field), data[mask]
 
     def _initialize_index(self, data_file, regions):
         p_fields = self._handle["/tracer particles"]
-        pos = np.column_stack(np.asarray(p_fields[:, i], dtype="=f8")
-                              for i in self._position_fields)
-        regions.add_data_file(pos, data_file.file_id)
-        morton = compute_morton(pos[:,0], pos[:,1], pos[:,2],
-                                data_file.ds.domain_left_edge,
-                                data_file.ds.domain_right_edge)
+        px, py, pz = self._position_fields
+        pcount = self._count_particles(data_file)["io"]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(self._chunksize, pcount - ind)
+            pos = np.empty((npart, 3), dtype="=f8")
+            pos[:,0] = p_fields[ind:ind+npart, px]
+            pos[:,1] = p_fields[ind:ind+npart, py]
+            pos[:,2] = p_fields[ind:ind+npart, pz]
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = \
+                compute_morton(pos[:,0], pos[:,1], pos[:,2],
+                               data_file.ds.domain_left_edge,
+                               data_file.ds.domain_right_edge)
+            ind += self._chunksize
         return morton
 
     def _count_particles(self, data_file):


https://bitbucket.org/yt_analysis/yt/commits/51a451f8d15d/
Changeset:   51a451f8d15d
Branch:      yt
User:        jzuhone
Date:        2016-05-12 13:32:24+00:00
Summary:     Updating FLASH particle docs
Affected #:  1 file

diff -r 2f68929d5015328e94ae59969a823c674bdd89d9 -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -779,16 +779,23 @@
 a plot file, checkpoint file, or particle file. Particle files require special handling
 depending on the situation, the main issue being that they typically lack grid information. 
 The first case is when you have a plotfile and a particle file that you would like to 
-load together. For instance, if you were in a directory with the following files:
+load together. In the simplest case, this occurs automatically. For instance, if you
+were in a directory with the following files:
 
 .. code-block:: none
 
-   radio_halo_1kpc_hdf5_plt_cnt_0100
-   radio_halo_1kpc_hdf5_part_0100
+   radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
+   radio_halo_1kpc_hdf5_part_0100 # particle file
 
 where the plotfile and the particle file were created at the same time (therefore having 
-particle data consistent with the grid structure of the former), its data may be loaded 
-with the ``particle_filename`` optional argument:
+particle data consistent with the grid structure of the former). Notice also that the 
+prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
+the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
+This also works when loading a number of files in a time series.
+
+If the two files do not have the same prefix and number, but they nevertheless have the same
+grid structure and are at the same simulation time, the particle data may be loaded with the
+``particle_filename`` optional argument to ``yt.load``:
 
 .. code-block:: python
 


https://bitbucket.org/yt_analysis/yt/commits/908968bc8e1c/
Changeset:   908968bc8e1c
Branch:      yt
User:        jzuhone
Date:        2016-05-12 14:13:01+00:00
Summary:     Merge
Affected #:  90 files

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -3,8 +3,8 @@
 from yt.analysis_modules.cosmological_observation.api import \
     LightRay
 
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-lr = LightRay(fn)
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+lr = LightRay(ds)
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
@@ -17,7 +17,6 @@
 
 # Optionally, we can now overplot this ray on a projection of the source
 # dataset
-ds = yt.load(fn)
 p = yt.ProjectionPlot(ds, 'z', 'density')
 p.annotate_ray(lr)
 p.save()

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -34,7 +34,8 @@
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
 To get started, make a new directory in ``yt/frontends`` with the name
-of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+of your code and add the name into ``yt/frontends/api.py``.
+Copying the contents of the ``yt/frontends/_skeleton``
 directory will add a lot of boilerplate for the required classes and
 methods that are needed.  In particular, you'll have to create a
 subclass of ``Dataset`` in the data_structures.py file. This subclass

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 
@@ -532,7 +538,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1027,6 +1027,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
+.. _conda-source-build:
 
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
-
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+- ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
-- ``mercurial``
-
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
+  $ pip install numpy matplotlib cython sympy
 
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-.. code-block:: bash
-
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
   if you are curious why ``--prefix=`` is neccessary on some systems.
 
-.. note::
-
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
-
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,6 +34,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -55,7 +55,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
@@ -113,24 +113,23 @@
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("redshift0058")
-   dd = ds.sphere("max", (200, "kpc"))
-   rho = 5e-27
+    import yt
+    from yt.units import kpc
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    dd = ds.sphere(ds.domain_center, (500, "kpc"))
+    rho = 1e-28
 
-   bounds = [(dd.center[i] - 100.0/ds['kpc'],
-              dd.center[i] + 100.0/ds['kpc']) for i in range(3)]
+    bounds = [[dd.center[i] - 250*kpc, dd.center[i] + 250*kpc] for i in range(3)]
 
-   surf = ds.surface(dd, "density", rho)
+    surf = ds.surface(dd, "density", rho)
 
-   upload_id = surf.export_sketchfab(
-       title = "RD0058 - 5e-27",
-       description = "Extraction of Density (colored by Temperature) at 5e-27 " \
-                   + "g/cc from a galaxy formation simulation by Ryan Joung."
-       color_field = "temperature",
-       color_map = "hot",
-       color_log = True,
-       bounds = bounds
+    upload_id = surf.export_sketchfab(
+        title="galaxy0030 - 1e-28",
+        description="Extraction of Density (colored by temperature) at 1e-28 g/cc",
+        color_field="temperature",
+        color_map="hot",
+        color_log=True,
+        bounds=bounds
    )
 
 and yt will extract a surface, convert to a format that Sketchfab.com
@@ -141,15 +140,13 @@
 
 .. raw:: html
 
-   <iframe frameborder="0" height="480" width="854" allowFullScreen
-   webkitallowfullscreen="true" mozallowfullscreen="true"
-   src="http://skfb.ly/l4jh2edcba?autostart=0&transparent=0&autospin=0&controls=1&watermark=1"></iframe>
+     <iframe width="640" height="480" src="https://sketchfab.com/models/ff59dacd55824110ad5bcc292371a514/embed" frameborder="0" allowfullscreen mozallowfullscreen="true" webkitallowfullscreen="true" onmousewheel=""></iframe>
 
 As a note, Sketchfab has a maximum model size of 50MB for the free account.
-50MB is pretty hefty, though, so it shouldn't be a problem for most needs.
-We're working on a way to optionally upload links to the Sketchfab models on
-the `yt Hub <https://hub.yt-project.org/>`_, but for now, if you want to share
-a cool model we'd love to see it!
+50MB is pretty hefty, though, so it shouldn't be a problem for most
+needs. Additionally, if you have an eligible e-mail address associated with a
+school or university, you can request a free professional account, which allows
+models up to 200MB. See https://sketchfab.com/education for details.
 
 OBJ and MTL Files
 -----------------
@@ -167,7 +164,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'
@@ -239,7 +236,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 setup.py
--- a/setup.py
+++ b/setup.py
@@ -157,14 +157,21 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.primitives",
+              ["yt/utilities/lib/primitives.pyx"],
+              libraries=std_libs, 
+              depends=["yt/utilities/lib/primitives.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/element_mappings.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/primitives.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -275,20 +282,30 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd"]),
+                  depends=["yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/mesh_intersection.pxd",
+                           "yt/utlilites/lib/mesh_samplers.pxd",
+                           "yt/utlilites/lib/mesh_traversal.pxd"]),
         Extension("yt.utilities.lib.mesh_traversal",
                   ["yt/utilities/lib/mesh_traversal.pyx"],
                   depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd"]),
+                           "yt/utilities/lib/grid_traversal.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
         Extension("yt.utilities.lib.mesh_samplers",
                   ["yt/utilities/lib/mesh_samplers.pyx"],
                   depends=["yt/utilities/lib/mesh_samplers.pxd",
                            "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/primitives.pxd"]),
         Extension("yt.utilities.lib.mesh_intersection",
                   ["yt/utilities/lib/mesh_intersection.pyx"],
                   depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/primitives.pxd",
+                           "yt/utilities/lib/vec3_ops.pxd"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -1,10 +1,11 @@
 import os
 from pkg_resources import resource_filename
 import shutil
-import subprocess
+from subprocess import Popen, PIPE
 import sys
 import tempfile
 
+
 def check_for_openmp():
     """Returns True if local setup supports OpenMP, False otherwise"""
 
@@ -37,13 +38,21 @@
             "}"
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-fopenmp', filename],
-                                        stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-fopenmp', filename],
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+        
+        if exit_code != 0:
+            print("Compilation of OpenMP test code failed with the error: ")
+            print(err)
+            print("Disabling OpenMP support. ")
 
         # Clean up
         file.close()
     except OSError:
+        print("check_for_openmp() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
         return False
     finally:
         os.chdir(curdir)
@@ -82,12 +91,11 @@
         except IOError:
             rd = '/usr/local'
 
-    fail_msg = ("Pyembree is installed, but I could not compile Embree test code. \n"
-               "I attempted to find Embree headers in %s. \n"
+    fail_msg = ("I attempted to find Embree headers in %s. \n"
                "If this is not correct, please set your correct embree location \n"
                "using EMBREE_DIR environment variable or your embree.cfg file. \n"
                "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html "
-                "for more information." % rd)
+                "for more information. \n" % rd)
 
     # Create a temporary directory
     tmpdir = tempfile.mkdtemp()
@@ -110,23 +118,29 @@
             '}'
         )
         file.flush()
-        with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
-                             stdout=fnull, stderr=fnull)
+        p = Popen(compiler + ['-I%s/include/' % rd, filename], 
+                  stdin=PIPE, stdout=PIPE, stderr=PIPE)
+        output, err = p.communicate()
+        exit_code = p.returncode
+
+        if exit_code != 0:
+            print("Pyembree is installed, but I could not compile Embree test code.")
+            print("The error message was: ")
+            print(err)
+            print(fail_msg)
 
         # Clean up
         file.close()
 
     except OSError:
-        print(fail_msg)
+        print("read_embree_location() could not find your C compiler. "
+              "Attempted to use '%s'. " % compiler)
+        return False
 
     finally:
         os.chdir(curdir)
         shutil.rmtree(tmpdir)
 
-    if exit_code != 0:
-        print(fail_msg)
-
     return rd
 
 

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,6 +20,9 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gamer_000:
+    - yt/frontends/gamer/tests/test_outputs.py
+
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
@@ -44,7 +47,7 @@
   local_tipsy_000:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_000:
+  local_varia_001:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -394,7 +394,8 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / 
+                               resolution) ).clip(0, np.inf) ) ).astype('int')
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
             # a note to the user about which lines components are unresolved

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -31,24 +31,19 @@
     parallel_objects, \
     parallel_root_only
 from yt.utilities.physical_constants import speed_of_light_cgs
+from yt.data_objects.static_output import Dataset
 
 class LightRay(CosmologySplice):
     """
-    LightRay(parameter_filename, simulation_type=None,
-             near_redshift=None, far_redshift=None,
-             use_minimum_datasets=True, deltaz_min=0.0,
-             minimum_coherent_box_fraction=0.0,
-             time_data=True, redshift_data=True,
-             find_outputs=False, load_kwargs=None):
-
-    Create a LightRay object.  A light ray is much like a light cone,
-    in that it stacks together multiple datasets in order to extend a
-    redshift interval.  Unlike a light cone, which does randomly
-    oriented projections for each dataset, a light ray consists of
-    randomly oriented single rays.  The purpose of these is to create
-    synthetic QSO lines of sight.
-
-    Light rays can also be made from single datasets.
+    A LightRay object is a one-dimensional object representing the trajectory 
+    of a ray of light as it passes through one or more datasets (simple and
+    compound rays respectively).  One can sample any of the fields intersected
+    by the LightRay object as it passed through the dataset(s).
+    
+    For compound rays, the LightRay stacks together multiple datasets in a time
+    series in order to approximate a LightRay's path through a volume
+    and redshift interval larger than a single simulation data output.
+    The outcome is something akin to a synthetic QSO line of sight.
 
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
@@ -56,31 +51,32 @@
 
     Parameters
     ----------
-    parameter_filename : string
-        The path to the simulation parameter file or dataset.
+    parameter_filename : string or :class:`yt.data_objects.static_output.Dataset`
+        For simple rays, one may pass either a loaded dataset object or
+        the filename of a dataset.
+        For compound rays, one must pass the filename of the simulation
+        parameter file.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to
-        refer to a single dataset.
+        This refers to the simulation frontend type.  Do not use for simple 
+        rays.
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use if making a light ray from a single
-        dataset.
+        datasets.  Do not use for simple rays.
         Default: None
     use_minimum_datasets : optional, bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
         will contain as many entries as possible within the redshift
-        interval.
+        interval.  Do not use for simple rays.
         Default: True.
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
-        datasets in the returned list.
+        datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
         Used with use_minimum_datasets set to False, this parameter
@@ -88,23 +84,26 @@
         before rerandomizing the projection axis and center.  This
         was invented to allow light rays with thin slices to sample
         coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.
+        so well.  Try setting this parameter to 1 and see what happens.  
+        Do not use for simple rays.
         Default: 0.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     redshift_data : optional, bool
         Whether or not to include redshift outputs when gathering
-        datasets for time series.
+        datasets for time series.  Do not use for simple rays.
         Default: True.
     find_outputs : optional, bool
         Whether or not to search for datasets in the current
-        directory.
+        directory.  Do not use for simple rays.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load"
-        function, appropriate for use of certain frontends.  E.g.
+        If you are passing a filename of a dataset to LightRay rather than an 
+        already loaded dataset, then you can optionally provide this dictionary 
+        as keywords when the dataset is loaded by yt with the "load" function.
+        Necessary for use with certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
         Default : None
@@ -130,26 +129,37 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.
-        if simulation_type is None:
+        # The options here are:
+        # 1) User passed us a dataset: use it to make a simple ray
+        # 2) User passed us a dataset filename: use it to make a simple ray
+        # 3) User passed us a simulation filename: use it to make a compound ray
+
+        # Make a light ray from a single, given dataset: #1, #2
+        if simulation_type is None:     
             self.simulation_type = simulation_type
-            ds = load(parameter_filename, **self.load_kwargs)
-            if ds.cosmological_simulation:
-                redshift = ds.current_redshift
+            if isinstance(self.parameter_filename, Dataset):
+                self.ds = self.parameter_filename
+                self.parameter_filename = self.ds.basename
+            elif isinstance(self.parameter_filename, str):
+                self.ds = load(self.parameter_filename, **self.load_kwargs)
+            if self.ds.cosmological_simulation:
+                redshift = self.ds.current_redshift
                 self.cosmology = Cosmology(
-                    hubble_constant=ds.hubble_constant,
-                    omega_matter=ds.omega_matter,
-                    omega_lambda=ds.omega_lambda,
-                    unit_registry=ds.unit_registry)
+                    hubble_constant=self.ds.hubble_constant,
+                    omega_matter=self.ds.omega_matter,
+                    omega_lambda=self.ds.omega_lambda,
+                    unit_registry=self.ds.unit_registry)
             else:
                 redshift = 0.
-            self.light_ray_solution.append({"filename": parameter_filename,
+            self.light_ray_solution.append({"filename": self.parameter_filename,
                                             "redshift": redshift})
 
-        # Make a light ray from a simulation time-series.
+        # Make a light ray from a simulation time-series. #3
         else:
+            self.ds = None
+            assert isinstance(self.parameter_filename, str)
             # Get list of datasets for light ray solution.
-            CosmologySplice.__init__(self, parameter_filename, simulation_type,
+            CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
               self.create_cosmology_splice(self.near_redshift, self.far_redshift,
@@ -270,7 +280,7 @@
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
         the ray and the path length of the ray through those cells.
-        Light ray data can be written out to an hdf5 file.
+        Light ray data must be written out to an hdf5 file.
 
         Parameters
         ----------
@@ -383,8 +393,12 @@
                                                        storage=all_ray_storage,
                                                        njobs=njobs):
 
-            # Load dataset for segment.
-            ds = load(my_segment['filename'], **self.load_kwargs)
+            # In case of simple rays, use the already loaded dataset: self.ds, 
+            # otherwise, load dataset for segment.
+            if self.ds is None:
+                ds = load(my_segment['filename'], **self.load_kwargs)
+            else:
+                ds = self.ds
 
             my_segment['unique_identifier'] = ds.unique_identifier
             if redshift is not None:
@@ -555,7 +569,7 @@
         Write light ray data to hdf5 file.
         """
         if self.simulation_type is None:
-            ds = load(self.parameter_filename, **self.load_kwargs)
+            ds = self.ds
         else:
             ds = {}
             ds["dimensionality"] = self.simulation.dimensionality

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- /dev/null
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -0,0 +1,92 @@
+"""
+Unit test for the light_ray analysis module
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    requires_file
+from yt.analysis_modules.cosmological_observation.api import LightRay
+import os
+import shutil
+from yt.utilities.answer_testing.framework import data_dir_load
+import tempfile
+
+COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo():
+    """
+    This test generates a cosmological light ray
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo():
+    """
+    This test generates a non-cosmological light ray
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_light_ray_non_cosmo_from_dataset():
+    """
+    This test generates a non-cosmological light ray created from an already
+    loaded dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(COSMO_PLUS_SINGLE)
+    lr = LightRay(ds)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -176,7 +176,7 @@
     constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
-        to use this.
+        to use this. It should be given in unit of solar metallicity.
         Default: None.
 
     This will create three fields:
@@ -245,7 +245,7 @@
 
     emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
@@ -253,7 +253,7 @@
 
     lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/s")
 
     def _photon_emissivity_field(field, data):
@@ -273,7 +273,7 @@
 
     phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="photons/cm**3/s")
 
     return emiss_name, lum_name, phot_name

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -65,6 +65,7 @@
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
     default_colormap = 'arbre',
+    ray_tracing_engine = 'embree',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 51a451f8d15deb4d30a41a086ee9aec19cdcc535 -r 908968bc8e1cd9dc82e46ca19151345207f04a64 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -597,7 +597,8 @@
             ftype = self._last_freq[0] or ftype
         field = (ftype, fname)
         if field == self._last_freq:
-            return self._last_finfo
+            if field not in self.field_info.field_aliases.values():
+                return self._last_finfo
         if field in self.field_info:
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/298ae0814635/
Changeset:   298ae0814635
Branch:      yt
User:        jzuhone
Date:        2016-05-12 14:44:31+00:00
Summary:     Bump answer tests
Affected #:  1 file

diff -r 908968bc8e1cd9dc82e46ca19151345207f04a64 -r 298ae0814635e2ff1c41ca39c0e71378e0fa5b14 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -14,7 +14,7 @@
   local_fits_000:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_000:
+  local_flash_001:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_000:


https://bitbucket.org/yt_analysis/yt/commits/07db33e310f5/
Changeset:   07db33e310f5
Branch:      yt
User:        jzuhone
Date:        2016-05-13 14:54:16+00:00
Summary:     Alphabetize imports and catch IOError
Affected #:  1 file

diff -r 298ae0814635e2ff1c41ca39c0e71378e0fa5b14 -r 07db33e310f550249c313f33d1f0b01d0f56c5e3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -18,21 +18,20 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.funcs import mylog
 from yt.geometry.grid_geometry_handler import \
     GridIndex
-from yt.data_objects.static_output import \
-    Dataset, ParticleFile
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.physical_ratios import cm_per_mpc
 from .fields import FLASHFieldInfo
 
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
-
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
     #__slots__ = ["_level_id", "stop_index"]
@@ -478,7 +477,7 @@
             if "bounding box" not in fileh["/"].keys() \
                 and "localnp" in fileh["/"].keys():
                 return True
-        except:
+        except IOError:
             pass
         return False
 


https://bitbucket.org/yt_analysis/yt/commits/506d890f8554/
Changeset:   506d890f8554
Branch:      yt
User:        ngoldbaum
Date:        2016-05-18 18:11:15+00:00
Summary:     Merged in jzuhone/yt (pull request #2157)

FLASHParticleDataset
Affected #:  6 files

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -775,32 +775,38 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+a plot file, checkpoint file, or particle file. Particle files require special handling
+depending on the situation, the main issue being that they typically lack grid information. 
+The first case is when you have a plotfile and a particle file that you would like to 
+load together. In the simplest case, this occurs automatically. For instance, if you
+were in a directory with the following files:
 
 .. code-block:: none
 
-   cosmoSim_coolhdf5_chk_0026
+   radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
+   radio_halo_1kpc_hdf5_part_0100 # particle file
 
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
+where the plotfile and the particle file were created at the same time (therefore having 
+particle data consistent with the grid structure of the former). Notice also that the 
+prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
+the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
+This also works when loading a number of files in a time series.
 
-.. code-block:: python
-
-   import yt
-   ds = yt.load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
+If the two files do not have the same prefix and number, but they nevertheless have the same
+grid structure and are at the same simulation time, the particle data may be loaded with the
+``particle_filename`` optional argument to ``yt.load``:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
+However, if you don't have a corresponding plotfile for a particle file, but would still 
+like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+grid information will not be available, and the particle data will be loaded in a fashion
+similar to SPH data. 
+
 .. rubric:: Caveats
 
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -14,7 +14,7 @@
   local_fits_000:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_000:
+  local_flash_001:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_000:

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -16,12 +16,14 @@
 from .data_structures import \
       FLASHGrid, \
       FLASHHierarchy, \
-      FLASHDataset
+      FLASHDataset, \
+      FLASHParticleDataset
 
 from .fields import \
       FLASHFieldInfo
 
 from .io import \
-      IOHandlerFLASH
+      IOHandlerFLASH, \
+      IOHandlerFLASHParticle
 
 from . import tests

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -18,13 +18,15 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.funcs import mylog
 from yt.geometry.grid_geometry_handler import \
     GridIndex
-from yt.data_objects.static_output import \
-    Dataset
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.physical_ratios import cm_per_mpc
@@ -251,8 +253,6 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         self.temperature_unit = self.quan(temperature_factor, "K")
-        # Still need to deal with:
-        #self.conversion_factors['temp'] = (1.0 + self.current_redshift)**-2.0
         self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         
     def set_code_units(self):
@@ -437,3 +437,52 @@
 
     def close(self):
         self._handle.close()
+
+class FLASHParticleFile(ParticleFile):
+    pass
+
+class FLASHParticleDataset(FLASHDataset):
+    _index_class = ParticleIndex
+    over_refine_factor = 1
+    filter_bbox = False
+    _file_class = FLASHParticleFile
+
+    def __init__(self, filename, dataset_type='flash_particle_hdf5',
+                 storage_filename = None,
+                 units_override = None,
+                 n_ref = 64, unit_system = "cgs"):
+
+        if self._handle is not None: return
+        self._handle = HDF5FileHandler(filename)
+        self.n_ref = n_ref
+        self.refine_by = 2
+        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
+                         unit_system=unit_system)
+        self.storage_filename = storage_filename
+
+    def _parse_parameter_file(self):
+        # Let the superclass do all the work but then
+        # fix the domain dimensions
+        super(FLASHParticleDataset, self)._parse_parameter_file()
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.zeros(3, "int32")
+        self.domain_dimensions[:self.dimensionality] = nz
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = HDF5FileHandler(args[0])
+            if "bounding box" not in fileh["/"].keys() \
+                and "localnp" in fileh["/"].keys():
+                return True
+        except IOError:
+            pass
+        return False
+
+    @classmethod
+    def _guess_candidates(cls, base, directories, files):
+        candidates = [_ for _ in files if "_hdf5_part_" in _]
+        # Typically, Flash won't have nested outputs.
+        return candidates, (len(candidates) == 0)

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -20,6 +20,8 @@
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import AlwaysSelector
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
 
 # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list
 def particle_sequences(grids):
@@ -34,6 +36,16 @@
         seq = list(v[1] for v in g)
         yield seq
 
+def determine_particle_fields(handle):
+    try:
+        particle_fields = [s[0].decode("ascii","ignore").strip()
+                           for s in handle["/particle names"][:]]
+        _particle_fields = dict([("particle_" + s, i) for i, s in
+                                 enumerate(particle_fields)])
+    except KeyError:
+        _particle_fields = {}
+    return _particle_fields
+
 class IOHandlerFLASH(BaseIOHandler):
     _particle_reader = False
     _dataset_type = "flash_hdf5"
@@ -43,15 +55,7 @@
         # Now we cache the particle fields
         self._handle = ds._handle
         self._particle_handle = ds._particle_handle
-        
-        try :
-            particle_fields = [s[0].decode("ascii","ignore").strip()
-                               for s in
-                               self._particle_handle["/particle names"][:]]
-            self._particle_fields = dict([("particle_" + s, i) for i, s in
-                                          enumerate(particle_fields)])
-        except KeyError:
-            self._particle_fields = {}
+        self._particle_fields = determine_particle_fields(self._particle_handle)
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -154,3 +158,96 @@
                     rv[g.id][field] = np.asarray(data[...,i], "=f8")
         return rv
 
+class IOHandlerFLASHParticle(BaseIOHandler):
+    _particle_reader = True
+    _dataset_type = "flash_particle_hdf5"
+
+    def __init__(self, ds):
+        super(IOHandlerFLASHParticle, self).__init__(ds)
+        # Now we cache the particle fields
+        self._handle = ds._handle
+        self._particle_fields = determine_particle_fields(self._handle)
+        self._position_fields = [self._particle_fields["particle_pos%s" % ax]
+                                 for ax in 'xyz']
+        self._chunksize = 32**3
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        px, py, pz = self._position_fields
+        p_fields = self._handle["/tracer particles"]
+        assert(len(data_files) == 1)
+        for data_file in sorted(data_files):
+            pcount = self._count_particles(data_file)["io"]
+            for ptype, field_list in sorted(ptf.items()):
+                total = 0
+                while total < pcount:
+                    count = min(self._chunksize, pcount - total)
+                    x = np.asarray(p_fields[total:total+count, px], dtype="=f8")
+                    y = np.asarray(p_fields[total:total+count, py], dtype="=f8")
+                    z = np.asarray(p_fields[total:total+count, pz], dtype="=f8")
+                    total += count
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        fi = self._particle_fields[field]
+                        data = p_fields[total-count:total, fi]
+                        yield (ptype, field), data[mask]
+
+    def _initialize_index(self, data_file, regions):
+        p_fields = self._handle["/tracer particles"]
+        px, py, pz = self._position_fields
+        pcount = self._count_particles(data_file)["io"]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(self._chunksize, pcount - ind)
+            pos = np.empty((npart, 3), dtype="=f8")
+            pos[:,0] = p_fields[ind:ind+npart, px]
+            pos[:,1] = p_fields[ind:ind+npart, py]
+            pos[:,2] = p_fields[ind:ind+npart, pz]
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = \
+                compute_morton(pos[:,0], pos[:,1], pos[:,2],
+                               data_file.ds.domain_left_edge,
+                               data_file.ds.domain_right_edge)
+            ind += self._chunksize
+        return morton
+
+    def _count_particles(self, data_file):
+        pcount = {"io": self._handle["/localnp"][:].sum()}
+        return pcount
+
+    def _identify_fields(self, data_file):
+        fields = [("io", field) for field in self._particle_fields]
+        return fields, {}

diff -r f562b7d66268e013dbd227ce912cba0137f9ef2f -r 506d890f8554f9d7c95480410f8add32981b7e7f yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -20,8 +20,11 @@
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    data_dir_load
-from yt.frontends.flash.api import FLASHDataset
+    data_dir_load, \
+    sph_answer
+from yt.frontends.flash.api import FLASHDataset, \
+    FLASHParticleDataset
+from collections import OrderedDict
 
 _fields = ("temperature", "density", "velocity_magnitude")
 
@@ -45,7 +48,6 @@
         test_wind_tunnel.__name__ = test.description
         yield test
 
-
 @requires_file(wt)
 def test_FLASHDataset():
     assert isinstance(data_dir_load(wt), FLASHDataset)
@@ -54,3 +56,28 @@
 def test_units_override():
     for test in units_override_check(sloshing):
         yield test
+
+fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080"
+
+fid_1to3_b1_fields = OrderedDict(
+    [
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "all_cic_velocity_x"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_y"), ("deposit", "all_cic")),
+        (("deposit", "all_cic_velocity_z"), ("deposit", "all_cic")),
+    ]
+)
+
+
+ at requires_file(fid_1to3_b1)
+def test_FLASHParticleDataset():
+    assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset)
+
+ at requires_ds(fid_1to3_b1, big_data=True)
+def test_fid_1to3_b1():
+    ds = data_dir_load(fid_1to3_b1)
+    for test in sph_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields):
+        test_fid_1to3_b1.__name__ = test.description
+        yield test

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160518/05a146a3/attachment-0002.htm>


More information about the yt-svn mailing list