[yt-svn] commit/yt: 12 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed May 11 11:32:21 PDT 2016


12 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/54edcb134b37/
Changeset:   54edcb134b37
Branch:      yt
User:        hyschive
Date:        2016-04-28 15:54:24+00:00
Summary:     GAMER frontend
Affected #:  10 files

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'flash',
     'gadget',
     'gadget_fof',
+    'gamer',
     'gdf',
     'halo_catalog',
     'http_stream',

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/__init__.py
--- /dev/null
+++ b/yt/frontends/gamer/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.gamer
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/api.py
--- /dev/null
+++ b/yt/frontends/gamer/api.py
@@ -0,0 +1,28 @@
+"""
+API for yt.frontends.gamer
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      GAMERGrid, \
+      GAMERHierarchy, \
+      GAMERDataset
+
+from .fields import \
+      GAMERFieldInfo
+
+from .io import \
+      IOHandlerGAMER
+
+### NOT SUPPORTED YET
+#from . import tests

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/data_structures.py
--- /dev/null
+++ b/yt/frontends/gamer/data_structures.py
@@ -0,0 +1,303 @@
+"""
+GAMER-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import mylog
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.data_objects.static_output import \
+    Dataset
+from yt.utilities.file_handler import \
+    HDF5FileHandler
+from yt.utilities.physical_ratios import cm_per_mpc
+from .fields import GAMERFieldInfo
+from yt.testing import assert_equal
+
+
+
+class GAMERGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, id, index, level):
+        AMRGridPatch.__init__(self, id,
+                              filename = index.index_filename,
+                              index    = index)
+        self.Parent   = None    # do NOT initialize Parent as []
+        self.Children = []
+        self.Level    = level
+        self.PID      = None    # patch ID adopted in GAMER
+        self.CID      = None    # cluster ID in the GAMER HDF5 output
+
+    def __repr__(self):
+        return 'GAMERGrid_%09i (dimension = %s)' % (self.id, self.ActiveDimensions)
+
+
+class GAMERHierarchy(GridIndex):
+    grid = GAMERGrid
+    
+    def __init__(self, ds, dataset_type = 'gamer'):
+        self.dataset_type     = dataset_type
+        self.dataset          = weakref.proxy(ds)
+        self.index_filename   = self.dataset.parameter_filename
+        self.directory        = os.path.dirname(self.index_filename)
+        self._handle          = ds._handle
+        self.float_type       = 'float64' # fixed even when FLOAT8 is off
+#       self._particle_handle = ds._particle_handle
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _detect_output_fields(self):
+        # find all field names in the current dataset
+        # choose an arbitrary grid since they all have the same fields
+        Grid = self._handle['Level_00']['Cluster_%09i'%0]['Patch_%09i'%0]
+        self.field_list = [ ('gamer', v) for v in Grid.dtype.names ]
+    
+    def _count_grids(self):
+        # count the total number of patches at all levels  
+        self.num_grids = self.dataset.parameters['NPatch'].sum()
+        
+    def _parse_index(self):
+        f    = self._handle
+        Para = self.dataset.parameters
+        GID0 = 0
+
+        self.grid_dimensions    [:] = Para['PatchSize']
+        self.grid_particle_count[:] = 0
+
+        for lv in range(0, Para['NLevel']):
+            NP = Para['NPatch'][lv]
+            if NP == 0: break
+
+            Scale  = Para['CellScale'][lv]
+            PScale = Para['PatchSize']*Scale
+            CrList = f['PatchMap'][ 'Level_%d%d'%(lv/10,lv%10) ]['CornerList'].value
+            Cr2Phy = f['PatchMap'][ 'Level_%d%d'%(lv/10,lv%10) ]['CornerList'].attrs['Cvt2Phy']
+
+            # set the level and edge of each grid
+            # (left/right_edge are YT arrays in code units)
+            self.grid_levels.flat[ GID0:GID0+NP ] = lv
+            self.grid_left_edge  [ GID0:GID0+NP ] = CrList[:]*Cr2Phy
+            self.grid_right_edge [ GID0:GID0+NP ] = (CrList[:] + PScale)*Cr2Phy
+
+            GID0 += NP
+
+        # allocate all grid objects
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in range(self.num_grids):
+            self.grids[i] = self.grid(i, self, self.grid_levels.flat[i])
+
+        # maximum level with patches (which can be lower than MAX_LEVEL)
+        self.max_level = self.grid_levels.max()
+        
+    def _populate_grid_objects(self):
+        f  = self._handle
+        NP = self.dataset.parameters['NPatch']
+        NC = f['SimuInfo']['KeyInfo']['H5_MaxDsetPerGroup']
+
+        for lv in range(0, self.dataset.parameters['NLevel']):
+            if NP[lv] == 0: break
+
+            # converting HDF5 dataset to numpy array in advance (using .value)
+            # is much faster than using h5py directly
+            SonList    = f['PatchMap'][ 'Level_%02i'%lv ]['SonList'].value
+#           SonList    = f['PatchMap'][ 'Level_%02i'%lv ]['SonList']
+            SonGID0    = NP[0:lv+1].sum()
+            GID0       = NP[0:lv  ].sum()
+
+            # set the parent-children relationship
+            for PID in range(0, NP[lv]):
+                Grid   = self.grids.flat[GID0+PID]
+                CID    = PID / NC
+                SonPID = SonList[PID]
+
+                if SonPID >= 0:
+                    Grid.Children = [ self.grids.flat[SonGID0+SonPID+s] \
+                                      for s in range(0,8) ]
+
+                for SonGrid in Grid.Children: SonGrid.Parent = Grid
+
+                # record the patch and cluster indices
+                Grid.PID = PID
+                Grid.CID = CID
+
+                # set up other grid attributes
+                Grid._prepare_grid()
+                Grid._setup_dx()
+
+        # validate the parent-children relationship in the debug mode
+        if self.dataset._debug:
+            self._validate_parent_children_relasionship()
+
+    # for _debug mode only
+    def _validate_parent_children_relasionship(self):
+        mylog.info('Validating the parent-children relationship ...')
+
+        f    = self._handle
+        Para = self.dataset.parameters
+
+        for Grid in self.grids:
+            # parent->children == itself
+            if Grid.Parent is not None:
+                assert Grid.Parent.Children[0+Grid.id%8] is Grid, \
+                       'Grid %d, Parent %d, Parent->Children %d' % \
+                       (Grid.id, Grid.Parent.id, Grid.Parent.Children[0].id)
+
+            # children->parent == itself
+            for c in Grid.Children:
+                assert c.Parent is Grid, \
+                       'Grid %d, Children %d, Children->Parent %d' % \
+                       (Grid.id, c.id, c.Parent.id)
+
+            # all refinement grids should have parent 
+            if Grid.Level > 0:
+                assert Grid.Parent is not None and Grid.Parent.id >= 0, \
+                       'Grid %d, Level %d, Parent %d' % \
+                       (Grid.id, Grid.Level, \
+                        Grid.Parent.id if Grid.Parent is not None else -999)
+
+            # parent index is consistent with the loaded dataset
+            if Grid.Level > 0:
+                NC     = f['SimuInfo']['KeyInfo']['H5_MaxDsetPerGroup']
+                PID    = Grid.PID
+                CID    = PID / NC
+                LvName = 'Level_%02i'   % Grid.Level
+                CName  = 'Cluster_%09i' % CID
+                PName  = 'Patch_%09i'   % PID
+                FaGID  = f[LvName][CName][PName].attrs['Info']['Father'] \
+                         + Para['NPatch'][0:Grid.Level-1].sum()
+                assert FaGID == Grid.Parent.id, \
+                       'Grid %d, Level %d, Parent_Found %d, Parent_Expect %d'%\
+                       (Grid.id, Grid.Level, Grid.Parent.id, FaGID)
+
+            # edges between children and parent
+            if len(Grid.Children) > 0:
+                assert_equal(Grid.LeftEdge,  Grid.Children[0].LeftEdge )
+                assert_equal(Grid.RightEdge, Grid.Children[7].RightEdge)
+        mylog.info('Check passed')
+               
+
+class GAMERDataset(Dataset):
+    _index_class      = GAMERHierarchy
+    _field_info_class = GAMERFieldInfo
+    _handle           = None
+#   _debug            = True  # turn on the debug mode
+    _debug            = False # turn on the debug mode
+    
+    def __init__(self, filename,
+                 dataset_type      = 'gamer',
+                 storage_filename  = None,
+                 particle_filename = None, 
+                 units_override    = None,
+                 unit_system       = "cgs"):
+
+        if self._handle is not None: return
+
+        self.fluid_types      += ('gamer',)
+        self._handle           = HDF5FileHandler(filename)
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None:
+            self._particle_handle = self._handle
+        else:
+            try:
+                self._particle_handle = HDF5FileHandler(self.particle_filename)
+            except:
+                raise IOError(self.particle_filename)
+
+        # currently GAMER only supports refinement by a factor of 2
+        self.refine_by = 2
+
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override = units_override,
+                         unit_system    = unit_system)
+        self.storage_filename = storage_filename
+        
+    def _set_code_unit_attributes(self):
+        # GAMER does not assume any unit yet ...
+        if len(self.units_override) == 0:
+            mylog.warning("GAMER does not assume any unit ==> " +
+                          "Use units_override to specify the units")
+
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            setattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
+
+            if len(self.units_override) == 0:
+                mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+        
+    def _parse_parameter_file(self):
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # shortcuts for different simulation information
+        KeyInfo   = self._handle['SimuInfo']['KeyInfo']
+        InputPara = self._handle['SimuInfo']['InputPara']
+        Makefile  = self._handle['SimuInfo']['Makefile']
+        SymConst  = self._handle['SimuInfo']['SymConst']
+
+        # simulation time and domain
+        self.current_time      = KeyInfo['Time'][0]
+        self.dimensionality    = 3  # always 3D
+        self.domain_left_edge  = np.array([0.,0.,0.], dtype='float64')
+        self.domain_right_edge = KeyInfo['BoxSize'].astype('float64')
+        self.domain_dimensions = KeyInfo['NX0'].astype('int64')
+
+        # periodicity
+        periodic = InputPara['Opt__BC_Flu'][0] == 0
+        self.periodicity = (periodic,periodic,periodic)
+
+        # cosmological parameters
+        if Makefile['Comoving']:
+            self.cosmological_simulation = 1
+            self.current_redshift        = 1.0/self.current_time - 1.0
+            self.omega_matter            = InputPara['OmegaM0'] 
+            self.omega_lambda            = 1.0 - self.omega_matter
+            self.hubble_constant         = 0.6955   # H0 is not set in GAMER
+        else:
+            self.cosmological_simulation = 0
+            self.current_redshift        = 0.0
+            self.omega_matter            = 0.0
+            self.omega_lambda            = 0.0
+            self.hubble_constant         = 0.0
+
+        # code-specific parameters
+        for t in KeyInfo, InputPara, Makefile, SymConst:
+            for v in t.dtype.names: self.parameters[v] = t[v]
+
+        # reset 'Model' to be more readable
+        self.parameters['Model'] =      'Hydro' if KeyInfo['Model'] == 1 \
+                                   else 'MHD'   if KeyInfo['Model'] == 2 \
+                                   else 'ELBDM' if KeyInfo['Model'] == 3 \
+                                   else 'Unknown'
+        # make aliases to some frequently used variables
+        if self.parameters['Model'] == 'Hydro' or \
+           self.parameters['Model'] == 'MHD':
+            self.gamma = self.parameters["Gamma"]
+            self.mu    = self.parameters.get("mu",0.6) # mean molecular weight
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            # define a unique way to identify GAMER datasets
+            f = HDF5FileHandler(args[0])
+            if 'PatchMap' in f['/'].keys():
+                return True
+        except:
+            pass
+        return False

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/fields.py
--- /dev/null
+++ b/yt/frontends/gamer/fields.py
@@ -0,0 +1,89 @@
+"""
+GAMER-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import FieldInfoContainer
+from yt.utilities.physical_constants import mh, boltzmann_constant_cgs
+
+b_units   = "code_magnetic"
+pre_units = "code_mass / (code_length*code_time**2)"
+erg_units = "code_mass / (code_length*code_time**2)"
+rho_units = "code_mass / code_length**3"
+mom_units = "code_mass / (code_length**2*code_time)"
+vel_units = "code_velocity"
+pot_units = "code_length**2/code_time**2"
+
+psi_units = "code_mass**(1/2) / code_length**(3/2)"
+
+
+class GAMERFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # hydro fields on disk (GAMER outputs conservative variables)
+        ( "Dens", (rho_units, ["density"],                 r"\rho") ),
+        ( "MomX", (mom_units, ["momentum_x"],              None   ) ),
+        ( "MomY", (mom_units, ["momentum_y"],              None   ) ),
+        ( "MomZ", (mom_units, ["momentum_z"],              None   ) ),
+        ( "Engy", (erg_units, ["total_energy"],            None   ) ),
+        ( "Pote", (pot_units, ["gravitational_potential"], None   ) ),
+
+        # psiDM fields on disk
+        ( "Real", (psi_units, ["real_part"],               None   ) ),
+        ( "Imag", (psi_units, ["imaginary_part"],          None   ) ),
+    )
+
+    known_particle_fields = (
+    )
+
+    def __init__(self, ds, field_list):
+        super(GAMERFieldInfo, self).__init__(ds, field_list)
+
+    # add primitive and other derived variables
+    def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
+
+        # velocity
+        def velocity_xyz(v):
+            def _velocity(field, data):
+                return data["gas", "momentum_%s"%v] / data["gas","density"]
+            return _velocity
+        for v in "xyz":
+            self.add_field( ("gas","velocity_%s"%v), function = velocity_xyz(v),
+                            units = unit_system["velocity"] )
+
+        # kinetic energy per volume
+        def ek(data):
+            return 0.5*( data["gamer","MomX"]**2 +
+                         data["gamer","MomY"]**2 +
+                         data["gamer","MomZ"]**2 ) / data["gamer","Dens"]
+
+        # thermal energy per volume
+        def et(data):
+            return data["gamer","Engy"] - ek(data)
+
+        # pressure
+        def _pressure(field, data):
+            return et(data)*(data.ds.gamma-1.0)
+        self.add_field( ("gas","pressure"), function = _pressure,
+                        units = unit_system["pressure"] )
+
+        # temperature
+        def _temperature(field, data):
+            return data.ds.mu*mh*data["gas","pressure"] / \
+                   (data["gas","density"]*boltzmann_constant_cgs)
+        self.add_field( ("gas","temperature"), function = _temperature,
+                        units = unit_system["temperature"] )
+
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/io.py
--- /dev/null
+++ b/yt/frontends/gamer/io.py
@@ -0,0 +1,85 @@
+"""
+GAMER-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import mylog
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+class IOHandlerGAMER(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type    = "gamer"
+
+    def __init__(self, ds):
+        super(IOHandlerGAMER, self).__init__(ds)
+        self._handle      = ds._handle
+        self._field_dtype = "float64" # fixed even when FLOAT8 is off
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
+        pass
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
+        pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks) # generator --> list
+
+        if any( (ftype != "gamer" for ftype, fname in fields) ):
+            raise NotImplementedError
+
+        rv = {}
+        for field in fields: rv[field] = np.empty( size, dtype=self._field_dtype )
+
+        ng = sum( len(c.objs) for c in chunks ) # c.objs is a list of grids
+        mylog.debug( "Reading %s cells of %s fields in %s grids",
+                     size, [f2 for f1, f2 in fields], ng )
+        offset = 0
+        for chunk in chunks:
+            data = self._read_chunk_data( chunk, fields )
+            for g in chunk.objs:
+                for field in fields:
+                    ds    = data[g.id].pop(field)
+                    # array return from g.select (i.e., rv[field]) is flat
+                    ncell = g.select( selector, ds, rv[field], offset )
+                offset += ncell
+                data.pop(g.id)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        data = {}
+        if len(chunk.objs) == 0: return data
+
+        for g in chunk.objs:
+            data[g.id] = {}
+            LvName = 'Level_%02i'   % g.Level
+            CName  = 'Cluster_%09i' % g.CID
+            PName  = 'Patch_%09i'   % g.PID
+
+            for field in fields:
+                # transpose x-z since YT assumes that consecutive cells along z
+                # are contiguous in memory
+                data[g.id][field] \
+                = self._handle[LvName][CName][PName][ field[1] ].swapaxes(0,2)
+        return data
+

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 yt/frontends/gamer/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/gamer/tests/test_outputs.py
@@ -0,0 +1,63 @@
+"""
+GAMER frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.gamer.api import GAMERDataset
+
+
+
+jet         = "InteractingJets/jet_000002"
+_fields_jet = ("temperature", "density", "velocity_magnitude")
+jet_units   = {"length_unit":(1.0,"kpc"),
+               "time_unit"  :(3.08567758096e+13,"s"),
+               "mass_unit"  :(1.4690033e+36,"g")}
+
+ at requires_ds(jet, big_data=True)
+def test_jet():
+    ds = data_dir_load(jet, kwargs={"units_override":jet_units})
+    yield assert_equal, str(ds), "jet_000002"
+    for test in small_patch_amr(ds, _fields_jet):
+        test_jet.__name__ = test.description
+        yield test
+
+
+psiDM         = "WaveDarkMatter/psiDM_000020"
+_fields_psiDM = ("Dens", "Real", "Imag")
+
+ at requires_ds(psiDM, big_data=True)
+def test_psiDM():
+    ds = data_dir_load(psiDM)
+    yield assert_equal, str(ds), "psiDM_000020"
+    for test in small_patch_amr(ds, _fields_psiDM):
+        test_psiDM.__name__ = test.description
+        yield test
+
+
+ at requires_file(psiDM)
+def test_GAMERDataset():
+    assert isinstance(data_dir_load(psiDM), GAMERDataset)
+
+
+ at requires_file(jet)
+def test_units_override():
+    for test in units_override_check(jet):
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/624aeedcc39f/
Changeset:   624aeedcc39f
Branch:      yt
User:        hyschive
Date:        2016-04-28 16:21:52+00:00
Summary:     Add answer test for GAMER in tests/tests.yaml
Affected #:  1 file

diff -r 54edcb134b37e2dd39b65f111a7f4b9bb47737d9 -r 624aeedcc39fc7f53acb792ec82f7113c4ea44d4 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,6 +20,9 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gamer_000:
+    - yt/frontends/gamer/tests/test_outputs.py
+
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 


https://bitbucket.org/yt_analysis/yt/commits/178141e68361/
Changeset:   178141e68361
Branch:      yt
User:        hyschive
Date:        2016-04-28 20:12:13+00:00
Summary:     Fixing flake8 errors
Affected #:  1 file

diff -r 624aeedcc39fc7f53acb792ec82f7113c4ea44d4 -r 178141e68361b4c7a0f9efa46279e55750180a3a yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -27,7 +27,6 @@
     Dataset
 from yt.utilities.file_handler import \
     HDF5FileHandler
-from yt.utilities.physical_ratios import cm_per_mpc
 from .fields import GAMERFieldInfo
 from yt.testing import assert_equal
 
@@ -281,10 +280,11 @@
             for v in t.dtype.names: self.parameters[v] = t[v]
 
         # reset 'Model' to be more readable
-        self.parameters['Model'] =      'Hydro' if KeyInfo['Model'] == 1 \
-                                   else 'MHD'   if KeyInfo['Model'] == 2 \
-                                   else 'ELBDM' if KeyInfo['Model'] == 3 \
-                                   else 'Unknown'
+        if KeyInfo['Model'] == 1:   self.parameters['Model'] = 'Hydro'
+        elif KeyInfo['Model'] == 2: self.parameters['Model'] = 'MHD'
+        elif KeyInfo['Model'] == 3: self.parameters['Model'] = 'ELBDM'
+        else:                       self.parameters['Model'] = 'Unknown'
+
         # make aliases to some frequently used variables
         if self.parameters['Model'] == 'Hydro' or \
            self.parameters['Model'] == 'MHD':


https://bitbucket.org/yt_analysis/yt/commits/60d8b92ce7f0/
Changeset:   60d8b92ce7f0
Branch:      yt
User:        hyschive
Date:        2016-04-28 20:24:23+00:00
Summary:     Fixing minor python style for if ... elif ...
Affected #:  1 file

diff -r 178141e68361b4c7a0f9efa46279e55750180a3a -r 60d8b92ce7f0575441bf988d77446d5c3c9605e1 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -280,10 +280,14 @@
             for v in t.dtype.names: self.parameters[v] = t[v]
 
         # reset 'Model' to be more readable
-        if KeyInfo['Model'] == 1:   self.parameters['Model'] = 'Hydro'
-        elif KeyInfo['Model'] == 2: self.parameters['Model'] = 'MHD'
-        elif KeyInfo['Model'] == 3: self.parameters['Model'] = 'ELBDM'
-        else:                       self.parameters['Model'] = 'Unknown'
+        if KeyInfo['Model'] == 1:
+            self.parameters['Model'] = 'Hydro'
+        elif KeyInfo['Model'] == 2:
+            self.parameters['Model'] = 'MHD'
+        elif KeyInfo['Model'] == 3:
+            self.parameters['Model'] = 'ELBDM'
+        else:
+            self.parameters['Model'] = 'Unknown'
 
         # make aliases to some frequently used variables
         if self.parameters['Model'] == 'Hydro' or \


https://bitbucket.org/yt_analysis/yt/commits/648dc24665ab/
Changeset:   648dc24665ab
Branch:      yt
User:        hyschive
Date:        2016-04-28 21:13:37+00:00
Summary:     Adding YT document
Affected #:  2 files

diff -r 60d8b92ce7f0575441bf988d77446d5c3c9605e1 -r 648dc24665ab1575417ce7beb4f3add3f9b805cd doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -806,6 +806,20 @@
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
   other :ref:`unit systems <unit_systems>` is also possible.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format is not supported.
+
 .. _loading-gadget-data:
 
 Gadget Data

diff -r 60d8b92ce7f0575441bf988d77446d5c3c9605e1 -r 648dc24665ab1575417ce7beb4f3add3f9b805cd doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,6 +34,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |


https://bitbucket.org/yt_analysis/yt/commits/7ed56d5c8cf4/
Changeset:   7ed56d5c8cf4
Branch:      yt
User:        hyschive
Date:        2016-05-06 17:43:23+00:00
Summary:     Rewrite data_structures.py to support the new GAMER data format
Affected #:  1 file

diff -r 648dc24665ab1575417ce7beb4f3add3f9b805cd -r 7ed56d5c8cf442841dcbd4cfd826abad414566a8 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -42,8 +42,6 @@
         self.Parent   = None    # do NOT initialize Parent as []
         self.Children = []
         self.Level    = level
-        self.PID      = None    # patch ID adopted in GAMER
-        self.CID      = None    # cluster ID in the GAMER HDF5 output
 
     def __repr__(self):
         return 'GAMERGrid_%09i (dimension = %s)' % (self.id, self.ActiveDimensions)
@@ -64,18 +62,17 @@
 
     def _detect_output_fields(self):
         # find all field names in the current dataset
-        # choose an arbitrary grid since they all have the same fields
-        Grid = self._handle['Level_00']['Cluster_%09i'%0]['Patch_%09i'%0]
-        self.field_list = [ ('gamer', v) for v in Grid.dtype.names ]
+        self.field_list = [ ('gamer', v) for v in self._handle['Data'].keys() ]
     
     def _count_grids(self):
         # count the total number of patches at all levels  
         self.num_grids = self.dataset.parameters['NPatch'].sum()
         
     def _parse_index(self):
-        f    = self._handle
-        Para = self.dataset.parameters
-        GID0 = 0
+        Para   = self.dataset.parameters
+        GID0   = 0
+        CrList = self._handle['Tree/Corner'].value
+        Cr2Phy = self._handle['Tree/Corner'].attrs['Cvt2Phy']
 
         self.grid_dimensions    [:] = Para['PatchSize']
         self.grid_particle_count[:] = 0
@@ -84,16 +81,13 @@
             NP = Para['NPatch'][lv]
             if NP == 0: break
 
-            Scale  = Para['CellScale'][lv]
-            PScale = Para['PatchSize']*Scale
-            CrList = f['PatchMap'][ 'Level_%d%d'%(lv/10,lv%10) ]['CornerList'].value
-            Cr2Phy = f['PatchMap'][ 'Level_%d%d'%(lv/10,lv%10) ]['CornerList'].attrs['Cvt2Phy']
+            PScale = Para['PatchSize']*Para['CellScale'][lv]
 
             # set the level and edge of each grid
             # (left/right_edge are YT arrays in code units)
             self.grid_levels.flat[ GID0:GID0+NP ] = lv
-            self.grid_left_edge  [ GID0:GID0+NP ] = CrList[:]*Cr2Phy
-            self.grid_right_edge [ GID0:GID0+NP ] = (CrList[:] + PScale)*Cr2Phy
+            self.grid_left_edge  [ GID0:GID0+NP ] = CrList[ GID0:GID0+NP ]*Cr2Phy
+            self.grid_right_edge [ GID0:GID0+NP ] = (CrList[ GID0:GID0+NP ] + PScale)*Cr2Phy
 
             GID0 += NP
 
@@ -106,39 +100,21 @@
         self.max_level = self.grid_levels.max()
         
     def _populate_grid_objects(self):
-        f  = self._handle
-        NP = self.dataset.parameters['NPatch']
-        NC = f['SimuInfo']['KeyInfo']['H5_MaxDsetPerGroup']
+        son_list = self._handle["Tree/Son"].value
 
-        for lv in range(0, self.dataset.parameters['NLevel']):
-            if NP[lv] == 0: break
+        for gid in range(self.num_grids):
+            grid     = self.grids.flat[gid]
+            son_gid0 = son_list[gid]
 
-            # converting HDF5 dataset to numpy array in advance (using .value)
-            # is much faster than using h5py directly
-            SonList    = f['PatchMap'][ 'Level_%02i'%lv ]['SonList'].value
-#           SonList    = f['PatchMap'][ 'Level_%02i'%lv ]['SonList']
-            SonGID0    = NP[0:lv+1].sum()
-            GID0       = NP[0:lv  ].sum()
+            # set up the parent-children relationship
+            if son_gid0 >= 0:
+                grid.Children = [ self.grids.flat[son_gid0+s] for s in range(8) ]
 
-            # set the parent-children relationship
-            for PID in range(0, NP[lv]):
-                Grid   = self.grids.flat[GID0+PID]
-                CID    = PID / NC
-                SonPID = SonList[PID]
+            for son_grid in grid.Children: son_grid.Parent = grid
 
-                if SonPID >= 0:
-                    Grid.Children = [ self.grids.flat[SonGID0+SonPID+s] \
-                                      for s in range(0,8) ]
-
-                for SonGrid in Grid.Children: SonGrid.Parent = Grid
-
-                # record the patch and cluster indices
-                Grid.PID = PID
-                Grid.CID = CID
-
-                # set up other grid attributes
-                Grid._prepare_grid()
-                Grid._setup_dx()
+            # set up other grid attributes
+            grid._prepare_grid()
+            grid._setup_dx()
 
         # validate the parent-children relationship in the debug mode
         if self.dataset._debug:
@@ -148,47 +124,40 @@
     def _validate_parent_children_relasionship(self):
         mylog.info('Validating the parent-children relationship ...')
 
-        f    = self._handle
-        Para = self.dataset.parameters
+        Para        = self.dataset.parameters
+        father_list = self._handle["Tree/Father"].value
 
-        for Grid in self.grids:
+        for grid in self.grids:
             # parent->children == itself
-            if Grid.Parent is not None:
-                assert Grid.Parent.Children[0+Grid.id%8] is Grid, \
+            if grid.Parent is not None:
+                assert grid.Parent.Children[0+grid.id%8] is grid, \
                        'Grid %d, Parent %d, Parent->Children %d' % \
-                       (Grid.id, Grid.Parent.id, Grid.Parent.Children[0].id)
+                       (grid.id, grid.Parent.id, grid.Parent.Children[0].id)
 
             # children->parent == itself
-            for c in Grid.Children:
-                assert c.Parent is Grid, \
+            for c in grid.Children:
+                assert c.Parent is grid, \
                        'Grid %d, Children %d, Children->Parent %d' % \
-                       (Grid.id, c.id, c.Parent.id)
+                       (grid.id, c.id, c.Parent.id)
 
             # all refinement grids should have parent 
-            if Grid.Level > 0:
-                assert Grid.Parent is not None and Grid.Parent.id >= 0, \
+            if grid.Level > 0:
+                assert grid.Parent is not None and grid.Parent.id >= 0, \
                        'Grid %d, Level %d, Parent %d' % \
-                       (Grid.id, Grid.Level, \
-                        Grid.Parent.id if Grid.Parent is not None else -999)
+                       (grid.id, grid.Level, \
+                        grid.Parent.id if grid.Parent is not None else -999)
 
             # parent index is consistent with the loaded dataset
-            if Grid.Level > 0:
-                NC     = f['SimuInfo']['KeyInfo']['H5_MaxDsetPerGroup']
-                PID    = Grid.PID
-                CID    = PID / NC
-                LvName = 'Level_%02i'   % Grid.Level
-                CName  = 'Cluster_%09i' % CID
-                PName  = 'Patch_%09i'   % PID
-                FaGID  = f[LvName][CName][PName].attrs['Info']['Father'] \
-                         + Para['NPatch'][0:Grid.Level-1].sum()
-                assert FaGID == Grid.Parent.id, \
+            if grid.Level > 0:
+                father_gid = father_list[grid.id]
+                assert father_gid == grid.Parent.id, \
                        'Grid %d, Level %d, Parent_Found %d, Parent_Expect %d'%\
-                       (Grid.id, Grid.Level, Grid.Parent.id, FaGID)
+                       (grid.id, grid.Level, grid.Parent.id, FaGID)
 
             # edges between children and parent
-            if len(Grid.Children) > 0:
-                assert_equal(Grid.LeftEdge,  Grid.Children[0].LeftEdge )
-                assert_equal(Grid.RightEdge, Grid.Children[7].RightEdge)
+            if len(grid.Children) > 0:
+                assert_equal(grid.LeftEdge,  grid.Children[0].LeftEdge )
+                assert_equal(grid.RightEdge, grid.Children[7].RightEdge)
         mylog.info('Check passed')
                
 
@@ -196,8 +165,7 @@
     _index_class      = GAMERHierarchy
     _field_info_class = GAMERFieldInfo
     _handle           = None
-#   _debug            = True  # turn on the debug mode
-    _debug            = False # turn on the debug mode
+    _debug            = True  # turn on/off the debug mode
     
     def __init__(self, filename,
                  dataset_type      = 'gamer',
@@ -245,10 +213,10 @@
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         # shortcuts for different simulation information
-        KeyInfo   = self._handle['SimuInfo']['KeyInfo']
-        InputPara = self._handle['SimuInfo']['InputPara']
-        Makefile  = self._handle['SimuInfo']['Makefile']
-        SymConst  = self._handle['SimuInfo']['SymConst']
+        KeyInfo   = self._handle['Info']['KeyInfo']
+        InputPara = self._handle['Info']['InputPara']
+        Makefile  = self._handle['Info']['Makefile']
+        SymConst  = self._handle['Info']['SymConst']
 
         # simulation time and domain
         self.current_time      = KeyInfo['Time'][0]
@@ -300,7 +268,7 @@
         try:
             # define a unique way to identify GAMER datasets
             f = HDF5FileHandler(args[0])
-            if 'PatchMap' in f['/'].keys():
+            if 'Info' in f['/'].keys() and 'KeyInfo' in f['/Info'].keys():
                 return True
         except:
             pass


https://bitbucket.org/yt_analysis/yt/commits/04b6be68c57d/
Changeset:   04b6be68c57d
Branch:      yt
User:        hyschive
Date:        2016-05-07 01:02:16+00:00
Summary:     Revise io.py to support the new GAMER output format
Affected #:  2 files

diff -r 7ed56d5c8cf442841dcbd4cfd826abad414566a8 -r 04b6be68c57d6b6ed0b71d2538a2f0a0c8a299ee yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -48,7 +48,8 @@
 
 
 class GAMERHierarchy(GridIndex):
-    grid = GAMERGrid
+    grid                 = GAMERGrid
+    _preload_implemented = True # since gamer defines "_read_chunk_data" in io.py
     
     def __init__(self, ds, dataset_type = 'gamer'):
         self.dataset_type     = dataset_type
@@ -165,7 +166,7 @@
     _index_class      = GAMERHierarchy
     _field_info_class = GAMERFieldInfo
     _handle           = None
-    _debug            = True  # turn on/off the debug mode
+    _debug            = False # turn on/off the debug mode
     
     def __init__(self, filename,
                  dataset_type      = 'gamer',

diff -r 7ed56d5c8cf442841dcbd4cfd826abad414566a8 -r 04b6be68c57d6b6ed0b71d2538a2f0a0c8a299ee yt/frontends/gamer/io.py
--- a/yt/frontends/gamer/io.py
+++ b/yt/frontends/gamer/io.py
@@ -14,9 +14,24 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.funcs import mylog
+from itertools import groupby
+
 from yt.utilities.io_handler import \
     BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.selection_routines import AlwaysSelector
+
+
+#-----------------------------------------------------------------------------
+# GAMER shares a similar HDF5 format, and thus io.py as well, with FLASH
+#-----------------------------------------------------------------------------
+
+
+# group grids with consecutive indices together to improve the I/O performance
+def grid_sequences(grids):
+    for k, g in groupby( enumerate(grids), lambda i_x1:i_x1[0]-i_x1[1].id ):
+        seq = list(v[1] for v in g)
+        yield seq
 
 class IOHandlerGAMER(BaseIOHandler):
     _particle_reader = False
@@ -28,20 +43,11 @@
         self._field_dtype = "float64" # fixed even when FLOAT8 is off
 
     def _read_particle_coords(self, chunks, ptf):
-        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
-        # chunks is a list of chunks, and ptf is a dict where the keys are
-        # ptypes and the values are lists of fields.
         pass
 
     def _read_particle_fields(self, chunks, ptf, selector):
-        # This gets called after the arrays have been allocated.  It needs to
-        # yield ((ptype, field), data) where data is the masked results of
-        # reading ptype, field and applying the selector to the data read in.
-        # Selector objects have a .select_points(x,y,z) that returns a mask, so
-        # you need to do your masking here.
         pass
 
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks) # generator --> list
 
@@ -54,32 +60,32 @@
         ng = sum( len(c.objs) for c in chunks ) # c.objs is a list of grids
         mylog.debug( "Reading %s cells of %s fields in %s grids",
                      size, [f2 for f1, f2 in fields], ng )
-        offset = 0
-        for chunk in chunks:
-            data = self._read_chunk_data( chunk, fields )
-            for g in chunk.objs:
-                for field in fields:
-                    ds    = data[g.id].pop(field)
-                    # array return from g.select (i.e., rv[field]) is flat
-                    ncell = g.select( selector, ds, rv[field], offset )
-                offset += ncell
-                data.pop(g.id)
+
+        for field in fields:
+            ds     = self._handle[ "/Data/%s" % field[1] ]
+            offset = 0
+            for chunk in chunks:
+                for gs in grid_sequences(chunk.objs):
+                    start = gs[ 0].id
+                    end   = gs[-1].id + 1
+                    data  = ds[start:end,:,:,:].transpose()
+                    for i, g in enumerate(gs):
+                        offset += g.select( selector, data[...,i], rv[field], offset )
         return rv
 
     def _read_chunk_data(self, chunk, fields):
-        data = {}
-        if len(chunk.objs) == 0: return data
+        rv = {}
+        if len(chunk.objs) == 0: return rv 
 
-        for g in chunk.objs:
-            data[g.id] = {}
-            LvName = 'Level_%02i'   % g.Level
-            CName  = 'Cluster_%09i' % g.CID
-            PName  = 'Patch_%09i'   % g.PID
+        for g in chunk.objs: rv[g.id] = {}
 
-            for field in fields:
-                # transpose x-z since YT assumes that consecutive cells along z
-                # are contiguous in memory
-                data[g.id][field] \
-                = self._handle[LvName][CName][PName][ field[1] ].swapaxes(0,2)
-        return data
+        for field in fields:
+            ds = f[ "/Data/%s" % field[1] ]
 
+            for gs in grid_sequences(chunk.objs):
+                start = gs[ 0].id
+                end   = gs[-1].id + 1
+                data  = ds[start:end,:,:,:].transpose()
+                for i, g in enumerate(gs):
+                    rv[g.id][field] = np.asarray( data[...,i], dtype=self._field_dtype )
+        return rv


https://bitbucket.org/yt_analysis/yt/commits/c6788b76342d/
Changeset:   c6788b76342d
Branch:      yt
User:        hyschive
Date:        2016-05-07 02:17:21+00:00
Summary:     Fix the typo in io.py
Affected #:  1 file

diff -r 04b6be68c57d6b6ed0b71d2538a2f0a0c8a299ee -r c6788b76342d85496891318350ffe8e58e52fb49 yt/frontends/gamer/io.py
--- a/yt/frontends/gamer/io.py
+++ b/yt/frontends/gamer/io.py
@@ -80,7 +80,7 @@
         for g in chunk.objs: rv[g.id] = {}
 
         for field in fields:
-            ds = f[ "/Data/%s" % field[1] ]
+            ds = self._handle[ "/Data/%s" % field[1] ]
 
             for gs in grid_sequences(chunk.objs):
                 start = gs[ 0].id


https://bitbucket.org/yt_analysis/yt/commits/8a110e6a0156/
Changeset:   8a110e6a0156
Branch:      yt
User:        hyschive
Date:        2016-05-07 21:39:08+00:00
Summary:     Rename internal variables, add thermal_energy and total_energy fields, update doc
Affected #:  5 files

diff -r c6788b76342d85496891318350ffe8e58e52fb49 -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 

diff -r c6788b76342d85496891318350ffe8e58e52fb49 -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -816,9 +816,23 @@
    import yt
    ds = yt.load("InteractingJets/jet_000002")
 
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
 .. rubric:: Caveats
 
-* GAMER data in raw binary format is not supported.
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
 
 .. _loading-gadget-data:
 

diff -r c6788b76342d85496891318350ffe8e58e52fb49 -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -58,7 +58,7 @@
         self.directory        = os.path.dirname(self.index_filename)
         self._handle          = ds._handle
         self.float_type       = 'float64' # fixed even when FLOAT8 is off
-#       self._particle_handle = ds._particle_handle
+        self._particle_handle = ds._particle_handle
         GridIndex.__init__(self, ds, dataset_type)
 
     def _detect_output_fields(self):
@@ -70,27 +70,29 @@
         self.num_grids = self.dataset.parameters['NPatch'].sum()
         
     def _parse_index(self):
-        Para   = self.dataset.parameters
-        GID0   = 0
-        CrList = self._handle['Tree/Corner'].value
-        Cr2Phy = self._handle['Tree/Corner'].attrs['Cvt2Phy']
+        parameters       = self.dataset.parameters
+        gid0             = 0
+        grid_corner      = self._handle['Tree/Corner'].value
+        convert2physical = self._handle['Tree/Corner'].attrs['Cvt2Phy']
 
-        self.grid_dimensions    [:] = Para['PatchSize']
+        self.grid_dimensions    [:] = parameters['PatchSize']
         self.grid_particle_count[:] = 0
 
-        for lv in range(0, Para['NLevel']):
-            NP = Para['NPatch'][lv]
-            if NP == 0: break
+        for lv in range(0, parameters['NLevel']):
+            num_grids_level = parameters['NPatch'][lv]
+            if num_grids_level == 0: break
 
-            PScale = Para['PatchSize']*Para['CellScale'][lv]
+            patch_scale = parameters['PatchSize']*parameters['CellScale'][lv]
 
             # set the level and edge of each grid
             # (left/right_edge are YT arrays in code units)
-            self.grid_levels.flat[ GID0:GID0+NP ] = lv
-            self.grid_left_edge  [ GID0:GID0+NP ] = CrList[ GID0:GID0+NP ]*Cr2Phy
-            self.grid_right_edge [ GID0:GID0+NP ] = (CrList[ GID0:GID0+NP ] + PScale)*Cr2Phy
+            self.grid_levels.flat[ gid0:gid0 + num_grids_level ] = lv
+            self.grid_left_edge[ gid0:gid0 + num_grids_level ] \
+                = grid_corner[ gid0:gid0 + num_grids_level ]*convert2physical
+            self.grid_right_edge[ gid0:gid0 + num_grids_level ] \
+                = (grid_corner[ gid0:gid0 + num_grids_level ] + patch_scale)*convert2physical
 
-            GID0 += NP
+            gid0 += num_grids_level
 
         # allocate all grid objects
         self.grids = np.empty(self.num_grids, dtype='object')
@@ -125,7 +127,6 @@
     def _validate_parent_children_relasionship(self):
         mylog.info('Validating the parent-children relationship ...')
 
-        Para        = self.dataset.parameters
         father_list = self._handle["Tree/Father"].value
 
         for grid in self.grids:
@@ -153,7 +154,7 @@
                 father_gid = father_list[grid.id]
                 assert father_gid == grid.Parent.id, \
                        'Grid %d, Level %d, Parent_Found %d, Parent_Expect %d'%\
-                       (grid.id, grid.Level, grid.Parent.id, FaGID)
+                       (grid.id, grid.Level, grid.Parent.id, father_gid)
 
             # edges between children and parent
             if len(grid.Children) > 0:
@@ -166,7 +167,7 @@
     _index_class      = GAMERHierarchy
     _field_info_class = GAMERFieldInfo
     _handle           = None
-    _debug            = False # turn on/off the debug mode
+    _debug            = False # debug mode for the GAMER frontend
     
     def __init__(self, filename,
                  dataset_type      = 'gamer',
@@ -227,7 +228,7 @@
         self.domain_dimensions = KeyInfo['NX0'].astype('int64')
 
         # periodicity
-        periodic = InputPara['Opt__BC_Flu'][0] == 0
+        periodic         = InputPara['Opt__BC_Flu'][0] == 0
         self.periodicity = (periodic,periodic,periodic)
 
         # cosmological parameters

diff -r c6788b76342d85496891318350ffe8e58e52fb49 -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 yt/frontends/gamer/fields.py
--- a/yt/frontends/gamer/fields.py
+++ b/yt/frontends/gamer/fields.py
@@ -34,12 +34,12 @@
         ( "MomX", (mom_units, ["momentum_x"],              None   ) ),
         ( "MomY", (mom_units, ["momentum_y"],              None   ) ),
         ( "MomZ", (mom_units, ["momentum_z"],              None   ) ),
-        ( "Engy", (erg_units, ["total_energy"],            None   ) ),
+        ( "Engy", (erg_units, ["total_energy_per_volume"], None   ) ),
         ( "Pote", (pot_units, ["gravitational_potential"], None   ) ),
 
         # psiDM fields on disk
-        ( "Real", (psi_units, ["real_part"],               None   ) ),
-        ( "Imag", (psi_units, ["imaginary_part"],          None   ) ),
+        ( "Real", (psi_units, ["psidm_real_part"],         None   ) ),
+        ( "Imag", (psi_units, ["psidm_imaginary_part"],    None   ) ),
     )
 
     known_particle_fields = (
@@ -61,6 +61,15 @@
             self.add_field( ("gas","velocity_%s"%v), function = velocity_xyz(v),
                             units = unit_system["velocity"] )
 
+        # ============================================================================
+        # note that yt internal fields assume
+        #    [thermal_energy]          = [energy per mass]
+        #    [kinetic_energy]          = [energy per volume]
+        # and we further adopt
+        #    [total_energy]            = [energy per mass]
+        #    [total_energy_per_volume] = [energy per volume]
+        # ============================================================================
+
         # kinetic energy per volume
         def ek(data):
             return 0.5*( data["gamer","MomX"]**2 +
@@ -71,6 +80,18 @@
         def et(data):
             return data["gamer","Engy"] - ek(data)
 
+        # thermal energy per mass (i.e., specific)
+        def _thermal_energy(field, data):
+            return et(data) / data["gamer","Dens"]
+        self.add_field( ("gas","thermal_energy"), function = _thermal_energy,
+                        units = unit_system["specific_energy"] )
+
+        # total energy per mass
+        def _total_energy(field, data):
+            return data["gamer","Engy"] / data["gamer","Dens"]
+        self.add_field( ("gas","total_energy"), function = _total_energy,
+                        units = unit_system["specific_energy"] )
+
         # pressure
         def _pressure(field, data):
             return et(data)*(data.ds.gamma-1.0)

diff -r c6788b76342d85496891318350ffe8e58e52fb49 -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 yt/frontends/gamer/io.py
--- a/yt/frontends/gamer/io.py
+++ b/yt/frontends/gamer/io.py
@@ -19,7 +19,6 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
-from yt.geometry.selection_routines import AlwaysSelector
 
 
 #-----------------------------------------------------------------------------


https://bitbucket.org/yt_analysis/yt/commits/e551ce706857/
Changeset:   e551ce706857
Branch:      yt
User:        hyschive
Date:        2016-05-08 22:28:46+00:00
Summary:     Merged with changeset 7f7ccbc9329e
Affected #:  46 files

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -538,7 +538,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
+.. _conda-source-build:
 
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
-
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+- ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
-- ``mercurial``
-
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
+  $ pip install numpy matplotlib cython sympy
 
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-.. code-block:: bash
-
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
   if you are curious why ``--prefix=`` is neccessary on some systems.
 
-.. note::
-
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
-
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -394,7 +394,8 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / 
+                               resolution) ).clip(0, np.inf) ) ).astype('int')
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
             # a note to the user about which lines components are unresolved

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -176,7 +176,7 @@
     constant_metallicity: float, optional
         If specified, assume a constant metallicity for the emission 
         from metals.  The *with_metals* keyword must be set to False 
-        to use this.
+        to use this. It should be given in unit of solar metallicity.
         Default: None.
 
     This will create three fields:
@@ -245,7 +245,7 @@
 
     emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/cm**3/s")
 
     def _luminosity_field(field, data):
@@ -253,7 +253,7 @@
 
     lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
                  units="erg/s")
 
     def _photon_emissivity_field(field, data):
@@ -273,7 +273,7 @@
 
     phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
     ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
                  units="photons/cm**3/s")
 
     return emiss_name, lum_name, phot_name

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/extern/functools32.py
--- a/yt/extern/functools32.py
+++ /dev/null
@@ -1,423 +0,0 @@
-"""functools.py - Tools for working with functions and callable objects
-"""
-# Python module wrapper for _functools C module
-# to allow utilities written in Python to be added
-# to the functools module.
-# Written by Nick Coghlan <ncoghlan at gmail.com>
-# and Raymond Hettinger <python at rcn.com>
-#   Copyright (C) 2006-2010 Python Software Foundation.
-# See C source code for _functools credits/copyright
-
-__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
-           'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
-
-from _functools import partial, reduce
-from collections import MutableMapping, namedtuple
-from .reprlib32 import recursive_repr as _recursive_repr
-from weakref import proxy as _proxy
-import sys as _sys
-try:
-    from _thread import allocate_lock as Lock
-except:
-    from ._dummy_thread32 import allocate_lock as Lock
-
-################################################################################
-### OrderedDict
-################################################################################
-
-class _Link(object):
-    __slots__ = 'prev', 'next', 'key', '__weakref__'
-
-class OrderedDict(dict):
-    'Dictionary that remembers insertion order'
-    # An inherited dict maps keys to values.
-    # The inherited dict provides __getitem__, __len__, __contains__, and get.
-    # The remaining methods are order-aware.
-    # Big-O running times for all methods are the same as regular dictionaries.
-
-    # The internal self.__map dict maps keys to links in a doubly linked list.
-    # The circular doubly linked list starts and ends with a sentinel element.
-    # The sentinel element never gets deleted (this simplifies the algorithm).
-    # The sentinel is in self.__hardroot with a weakref proxy in self.__root.
-    # The prev links are weakref proxies (to prevent circular references).
-    # Individual links are kept alive by the hard reference in self.__map.
-    # Those hard references disappear when a key is deleted from an OrderedDict.
-
-    def __init__(self, *args, **kwds):
-        '''Initialize an ordered dictionary.  The signature is the same as
-        regular dictionaries, but keyword arguments are not recommended because
-        their insertion order is arbitrary.
-
-        '''
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__root
-        except AttributeError:
-            self.__hardroot = _Link()
-            self.__root = root = _proxy(self.__hardroot)
-            root.prev = root.next = root
-            self.__map = {}
-        self.__update(*args, **kwds)
-
-    def __setitem__(self, key, value,
-                    dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
-        'od.__setitem__(i, y) <==> od[i]=y'
-        # Setting a new item creates a new link at the end of the linked list,
-        # and the inherited dictionary is updated with the new key/value pair.
-        if key not in self:
-            self.__map[key] = link = Link()
-            root = self.__root
-            last = root.prev
-            link.prev, link.next, link.key = last, root, key
-            last.next = link
-            root.prev = proxy(link)
-        dict_setitem(self, key, value)
-
-    def __delitem__(self, key, dict_delitem=dict.__delitem__):
-        'od.__delitem__(y) <==> del od[y]'
-        # Deleting an existing item uses self.__map to find the link which gets
-        # removed by updating the links in the predecessor and successor nodes.
-        dict_delitem(self, key)
-        link = self.__map.pop(key)
-        link_prev = link.prev
-        link_next = link.next
-        link_prev.next = link_next
-        link_next.prev = link_prev
-
-    def __iter__(self):
-        'od.__iter__() <==> iter(od)'
-        # Traverse the linked list in order.
-        root = self.__root
-        curr = root.next
-        while curr is not root:
-            yield curr.key
-            curr = curr.next
-
-    def __reversed__(self):
-        'od.__reversed__() <==> reversed(od)'
-        # Traverse the linked list in reverse order.
-        root = self.__root
-        curr = root.prev
-        while curr is not root:
-            yield curr.key
-            curr = curr.prev
-
-    def clear(self):
-        'od.clear() -> None.  Remove all items from od.'
-        root = self.__root
-        root.prev = root.next = root
-        self.__map.clear()
-        dict.clear(self)
-
-    def popitem(self, last=True):
-        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
-        Pairs are returned in LIFO order if last is true or FIFO order if false.
-
-        '''
-        if not self:
-            raise KeyError('dictionary is empty')
-        root = self.__root
-        if last:
-            link = root.prev
-            link_prev = link.prev
-            link_prev.next = root
-            root.prev = link_prev
-        else:
-            link = root.next
-            link_next = link.next
-            root.next = link_next
-            link_next.prev = root
-        key = link.key
-        del self.__map[key]
-        value = dict.pop(self, key)
-        return key, value
-
-    def move_to_end(self, key, last=True):
-        '''Move an existing element to the end (or beginning if last==False).
-
-        Raises KeyError if the element does not exist.
-        When last=True, acts like a fast version of self[key]=self.pop(key).
-
-        '''
-        link = self.__map[key]
-        link_prev = link.prev
-        link_next = link.next
-        link_prev.next = link_next
-        link_next.prev = link_prev
-        root = self.__root
-        if last:
-            last = root.prev
-            link.prev = last
-            link.next = root
-            last.next = root.prev = link
-        else:
-            first = root.next
-            link.prev = root
-            link.next = first
-            root.next = first.prev = link
-
-    def __sizeof__(self):
-        sizeof = _sys.getsizeof
-        n = len(self) + 1                       # number of links including root
-        size = sizeof(self.__dict__)            # instance dictionary
-        size += sizeof(self.__map) * 2          # internal dict and inherited dict
-        size += sizeof(self.__hardroot) * n     # link objects
-        size += sizeof(self.__root) * n         # proxy objects
-        return size
-
-    update = __update = MutableMapping.update
-    keys = MutableMapping.keys
-    values = MutableMapping.values
-    items = MutableMapping.items
-    __ne__ = MutableMapping.__ne__
-
-    __marker = object()
-
-    def pop(self, key, default=__marker):
-        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding
-        value.  If key is not found, d is returned if given, otherwise KeyError
-        is raised.
-
-        '''
-        if key in self:
-            result = self[key]
-            del self[key]
-            return result
-        if default is self.__marker:
-            raise KeyError(key)
-        return default
-
-    def setdefault(self, key, default=None):
-        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
-        if key in self:
-            return self[key]
-        self[key] = default
-        return default
-
-    @_recursive_repr()
-    def __repr__(self):
-        'od.__repr__() <==> repr(od)'
-        if not self:
-            return '%s()' % (self.__class__.__name__,)
-        return '%s(%r)' % (self.__class__.__name__, list(self.items()))
-
-    def __reduce__(self):
-        'Return state information for pickling'
-        items = [[k, self[k]] for k in self]
-        inst_dict = vars(self).copy()
-        for k in vars(OrderedDict()):
-            inst_dict.pop(k, None)
-        if inst_dict:
-            return (self.__class__, (items,), inst_dict)
-        return self.__class__, (items,)
-
-    def copy(self):
-        'od.copy() -> a shallow copy of od'
-        return self.__class__(self)
-
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
-        If not specified, the value defaults to None.
-
-        '''
-        self = cls()
-        for key in iterable:
-            self[key] = value
-        return self
-
-    def __eq__(self, other):
-        '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
-        while comparison to a regular mapping is order-insensitive.
-
-        '''
-        if isinstance(other, OrderedDict):
-            return len(self)==len(other) and \
-                   all(p==q for p, q in zip(self.items(), other.items()))
-        return dict.__eq__(self, other)
-
-# update_wrapper() and wraps() are tools to help write
-# wrapper functions that can handle naive introspection
-
-WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
-WRAPPER_UPDATES = ('__dict__',)
-def update_wrapper(wrapper,
-                   wrapped,
-                   assigned = WRAPPER_ASSIGNMENTS,
-                   updated = WRAPPER_UPDATES):
-    """Update a wrapper function to look like the wrapped function
-
-       wrapper is the function to be updated
-       wrapped is the original function
-       assigned is a tuple naming the attributes assigned directly
-       from the wrapped function to the wrapper function (defaults to
-       functools.WRAPPER_ASSIGNMENTS)
-       updated is a tuple naming the attributes of the wrapper that
-       are updated with the corresponding attribute from the wrapped
-       function (defaults to functools.WRAPPER_UPDATES)
-    """
-    wrapper.__wrapped__ = wrapped
-    for attr in assigned:
-        try:
-            value = getattr(wrapped, attr)
-        except AttributeError:
-            pass
-        else:
-            setattr(wrapper, attr, value)
-    for attr in updated:
-        getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
-    # Return the wrapper so this can be used as a decorator via partial()
-    return wrapper
-
-def wraps(wrapped,
-          assigned = WRAPPER_ASSIGNMENTS,
-          updated = WRAPPER_UPDATES):
-    """Decorator factory to apply update_wrapper() to a wrapper function
-
-       Returns a decorator that invokes update_wrapper() with the decorated
-       function as the wrapper argument and the arguments to wraps() as the
-       remaining arguments. Default arguments are as for update_wrapper().
-       This is a convenience function to simplify applying partial() to
-       update_wrapper().
-    """
-    return partial(update_wrapper, wrapped=wrapped,
-                   assigned=assigned, updated=updated)
-
-def total_ordering(cls):
-    """Class decorator that fills in missing ordering methods"""
-    convert = {
-        '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
-                   ('__le__', lambda self, other: self < other or self == other),
-                   ('__ge__', lambda self, other: not self < other)],
-        '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
-                   ('__lt__', lambda self, other: self <= other and not self == other),
-                   ('__gt__', lambda self, other: not self <= other)],
-        '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
-                   ('__ge__', lambda self, other: self > other or self == other),
-                   ('__le__', lambda self, other: not self > other)],
-        '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
-                   ('__gt__', lambda self, other: self >= other and not self == other),
-                   ('__lt__', lambda self, other: not self >= other)]
-    }
-    roots = set(dir(cls)) & set(convert)
-    if not roots:
-        raise ValueError('must define at least one ordering operation: < ><= >=')
-    root = max(roots)       # prefer __lt__ to __le__ to __gt__ to __ge__
-    for opname, opfunc in convert[root]:
-        if opname not in roots:
-            opfunc.__name__ = opname
-            opfunc.__doc__ = getattr(int, opname).__doc__
-            setattr(cls, opname, opfunc)
-    return cls
-
-def cmp_to_key(mycmp):
-    """Convert a cmp= function into a key= function"""
-    class K(object):
-        __slots__ = ['obj']
-        def __init__(self, obj):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) < 0
-        def __gt__(self, other):
-            return mycmp(self.obj, other.obj) > 0
-        def __eq__(self, other):
-            return mycmp(self.obj, other.obj) == 0
-        def __le__(self, other):
-            return mycmp(self.obj, other.obj) <= 0
-        def __ge__(self, other):
-            return mycmp(self.obj, other.obj) >= 0
-        def __ne__(self, other):
-            return mycmp(self.obj, other.obj) != 0
-        __hash__ = None
-    return K
-
-_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
-
-def lru_cache(maxsize=100):
-    """Least-recently-used cache decorator.
-
-    If *maxsize* is set to None, the LRU features are disabled and the cache
-    can grow without bound.
-
-    Arguments to the cached function must be hashable.
-
-    View the cache statistics named tuple (hits, misses, maxsize, currsize) with
-    f.cache_info().  Clear the cache and statistics with f.cache_clear().
-    Access the underlying function with f.__wrapped__.
-
-    See:  http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
-
-    """
-    # Users should only access the lru_cache through its public API:
-    #       cache_info, cache_clear, and f.__wrapped__
-    # The internals of the lru_cache are encapsulated for thread safety and
-    # to allow the implementation to change (including a possible C version).
-
-    def decorating_function(user_function,
-                tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
-
-        hits, misses = [0], [0]
-        kwd_mark = (object(),)          # separates positional and keyword args
-        lock = Lock()                   # needed because OrderedDict isn't threadsafe
-
-        if maxsize is None:
-            cache = dict()              # simple cache without ordering or size limit
-
-            @wraps(user_function)
-            def wrapper(*args, **kwds):
-                key = args
-                if kwds:
-                    key += kwd_mark + tuple(sorted(kwds.items()))
-                try:
-                    result = cache[key]
-                    hits[0] += 1
-                    return result
-                except KeyError:
-                    pass
-                result = user_function(*args, **kwds)
-                cache[key] = result
-                misses[0] += 1
-                return result
-        else:
-            cache = OrderedDict()           # ordered least recent to most recent
-            cache_popitem = cache.popitem
-            cache_renew = cache.move_to_end
-
-            @wraps(user_function)
-            def wrapper(*args, **kwds):
-                key = args
-                if kwds:
-                    key += kwd_mark + tuple(sorted(kwds.items()))
-                with lock:
-                    try:
-                        result = cache[key]
-                        cache_renew(key)    # record recent use of this key
-                        hits[0] += 1
-                        return result
-                    except KeyError:
-                        pass
-                result = user_function(*args, **kwds)
-                with lock:
-                    cache[key] = result     # record recent use of this key
-                    misses[0] += 1
-                    if len(cache) > maxsize:
-                        cache_popitem(0)    # purge least recently used cache entry
-                return result
-
-        def cache_info():
-            """Report cache statistics"""
-            with lock:
-                return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
-
-        def cache_clear():
-            """Clear the cache and cache statistics"""
-            with lock:
-                cache.clear()
-                hits[0] = misses[0] = 0
-
-        wrapper.cache_info = cache_info
-        wrapper.cache_clear = cache_clear
-        return wrapper
-
-    return decorating_function

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -79,8 +79,8 @@
     ("TangentialVelocity",               "tangential_velocity"),
     ("CuttingPlaneVelocityX",            "cutting_plane_velocity_x"),
     ("CuttingPlaneVelocityY",            "cutting_plane_velocity_y"),
-    ("CuttingPlaneBX",                   "cutting_plane_bx"),
-    ("CuttingPlaneBy",                   "cutting_plane_by"),
+    ("CuttingPlaneBX",                   "cutting_plane_magnetic_field_x"),
+    ("CuttingPlaneBy",                   "cutting_plane_magnetic_field_y"),
     ("MeanMolecularWeight",              "mean_molecular_weight"),
     ("particle_density",                 "particle_density"),
     ("ThermalEnergy",                    "thermal_energy"),

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -54,6 +54,7 @@
     unit_system = registry.ds.unit_system
 
     create_vector_fields(registry, "velocity", unit_system["velocity"], ftype, slice_info)
+    create_vector_fields(registry, "magnetic_field", unit_system["magnetic_field"], ftype, slice_info)
 
     def _cell_mass(field, data):
         return data[ftype, "density"] * data[ftype, "cell_volume"]

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -25,7 +25,10 @@
 class LocalFieldInfoContainer(FieldInfoContainer):
     def add_field(self, name, function=None, **kwargs):
         if not isinstance(name, tuple):
-            name = ('gas', name)
+            if kwargs.setdefault('particle_type', False):
+                name = ('all', name)
+            else:
+                name = ('gas', name)
         override = kwargs.get("force_override", False)
         # Handle the case where the field has already been added.
         if not override and name in self:

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -183,13 +183,15 @@
     def _cp_vectors(ax):
         def _cp_val(field, data):
             vec = data.get_field_parameter("cp_%s_vec" % (ax))
-            bv = data.get_field_parameter("bulk_%s" % basename)
-            if bv is None: bv = np.zeros(3)
-            tr  = (data[xn] - bv[0]) * vec[0]
-            tr += (data[yn] - bv[1]) * vec[1]
-            tr += (data[zn] - bv[2]) * vec[2]
+            bv = data.get_field_parameter("bulk_%s" % basename, None)
+            if bv is None:
+                bv = data.ds.arr(np.zeros(3), data[xn].units)
+            tr  = (data[xn] - bv[0]) * vec.d[0]
+            tr += (data[yn] - bv[1]) * vec.d[1]
+            tr += (data[zn] - bv[2]) * vec.d[2]
             return tr
         return _cp_val
+
     registry.add_field((ftype, "cutting_plane_%s_x" % basename),
                        function=_cp_vectors('x'),
                        units=field_units)

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -16,7 +16,6 @@
 from yt.fields.derived_field import ValidateSpatial
 from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _astropy
-from yt.visualization._mpl_imports import FigureCanvasAgg
 from yt.units.yt_array import YTQuantity, YTArray
 from yt.utilities.fits_image import FITSImageData
 if PY3:
@@ -258,6 +257,7 @@
         self.pw.save(name=name, mpl_kwargs=mpl_kwargs)
 
     def _repr_html_(self):
+        from yt.visualization._mpl_imports import FigureCanvasAgg
         ret = ''
         for k, v in self.plots.items():
             canvas = FigureCanvasAgg(v)

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -192,12 +192,26 @@
         self.particle_filename = particle_filename
 
         if self.particle_filename is None:
-            self._particle_handle = self._handle
+            # try to guess the particle filename
+            try:
+                self._particle_handle = HDF5FileHandler(filename.replace('plt_cnt', 'part'))
+                self.particle_filename = filename.replace('plt_cnt', 'part')
+                mylog.info('Particle file found: %s' % self.particle_filename.split('/')[-1])
+            except IOError:
+                self._particle_handle = self._handle
         else:
+            # particle_filename is specified by user
             try:
                 self._particle_handle = HDF5FileHandler(self.particle_filename)
             except:
                 raise IOError(self.particle_filename)
+        # Check if the particle file has the same time
+        if self._particle_handle != self._handle:
+            part_time = self._particle_handle.handle.get('real scalars')[0][1]
+            plot_time = self._handle.handle.get('real scalars')[0][1]
+            if not np.isclose(part_time, plot_time):
+                raise IOError('%s and  %s are not at the same time.' % (self.particle_filename, filename))
+
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/http_stream/data_structures.py
--- a/yt/frontends/http_stream/data_structures.py
+++ b/yt/frontends/http_stream/data_structures.py
@@ -15,6 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import json
 import numpy as np
 import time
 
@@ -24,15 +25,11 @@
     ParticleDataset
 from yt.frontends.sph.fields import \
     SPHFieldInfo
+from yt.funcs import \
+    get_requests
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 
-try:
-    import requests
-    import json
-except ImportError:
-    requests = None
-
 class HTTPParticleFile(ParticleFile):
     pass
 
@@ -49,8 +46,9 @@
                  dataset_type = "http_particle_stream",
                  n_ref = 64, over_refine_factor=1, 
                  unit_system="cgs"):
-        if requests is None:
-            raise RuntimeError
+        if get_requests() is None:
+            raise ImportError(
+                "This functionality depends on the requests package")
         self.base_url = base_url
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
@@ -66,6 +64,7 @@
         self.parameters["HydroMethod"] = "sph"
 
         # Here's where we're going to grab the JSON index file
+        requests = get_requests()
         hreq = requests.get(self.base_url + "/yt_index.json")
         if hreq.status_code != 200:
             raise RuntimeError
@@ -108,6 +107,9 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].startswith("http://"):
             return False
+        requests = get_requests()
+        if requests is None:
+            return False
         hreq = requests.get(args[0] + "/yt_index.json")
         if hreq.status_code == 200:
             return True

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/http_stream/io.py
--- a/yt/frontends/http_stream/io.py
+++ b/yt/frontends/http_stream/io.py
@@ -18,24 +18,21 @@
 import numpy as np
 
 from yt.funcs import \
+    get_requests, \
     mylog
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 
-try:
-    import requests
-except ImportError:
-    requests = None
-
 class IOHandlerHTTPStream(BaseIOHandler):
     _dataset_type = "http_particle_stream"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     def __init__(self, ds):
-        if requests is None:
-            raise RuntimeError
+        if get_requests() is None:
+            raise ImportError(
+                "This functionality depends on the requests package")
         self._url = ds.base_url
         # This should eventually manage the IO and cache it
         self.total_bytes = 0
@@ -47,6 +44,7 @@
         s = "%s/%s/%s/%s" % (self._url,
             data_file.file_id, ftype, fname)
         mylog.info("Loading URL %s", s)
+        requests = get_requests()
         resp = requests.get(s)
         if resp.status_code != 200:
             raise RuntimeError

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/owls/fields.py
--- a/yt/frontends/owls/fields.py
+++ b/yt/frontends/owls/fields.py
@@ -222,41 +222,40 @@
         """ returns a function that calculates the ion density of a particle. 
         """ 
 
-        def _ion_density(field, data):
+        def get_owls_ion_density_field(ion, ftype, itab):
+            def _func(field, data):
 
-            # get element symbol from ion string. ion string will 
-            # be a member of the tuple _ions (i.e. si13)
-            #--------------------------------------------------------
-            if ion[0:2].isalpha():
-                symbol = ion[0:2].capitalize()
-            else:
-                symbol = ion[0:1].capitalize()
+                # get element symbol from ion string. ion string will 
+                # be a member of the tuple _ions (i.e. si13)
+                #--------------------------------------------------------
+                if ion[0:2].isalpha():
+                    symbol = ion[0:2].capitalize()
+                else:
+                    symbol = ion[0:1].capitalize()
 
-            # mass fraction for the element
-            #--------------------------------------------------------
-            m_frac = data[ftype, symbol+"_fraction"]
+                # mass fraction for the element
+                #--------------------------------------------------------
+                m_frac = data[ftype, symbol+"_fraction"]
 
-            # get nH and T for lookup
-            #--------------------------------------------------------
-            log_nH = np.log10( data["PartType0", "H_number_density"] )
-            log_T = np.log10( data["PartType0", "Temperature"] )
+                # get nH and T for lookup
+                #--------------------------------------------------------
+                log_nH = np.log10( data["PartType0", "H_number_density"] )
+                log_T = np.log10( data["PartType0", "Temperature"] )
 
-            # get name of owls_ion_file for given ion
-            #--------------------------------------------------------
-            owls_ion_path = self._get_owls_ion_data_dir()
-            fname = os.path.join( owls_ion_path, ion+".hdf5" )
+                # get name of owls_ion_file for given ion
+                #--------------------------------------------------------
+                itab.set_iz( data.ds.current_redshift )
 
-            # create ionization table for this redshift
-            #--------------------------------------------------------
-            itab = oit.IonTableOWLS( fname )
-            itab.set_iz( data.ds.current_redshift )
-
-            # find ion balance using log nH and log T
-            #--------------------------------------------------------
-            i_frac = itab.interp( log_nH, log_T )
-            return data[ftype,"Density"] * m_frac * i_frac 
-        
-        return _ion_density
+                # find ion balance using log nH and log T
+                #--------------------------------------------------------
+                i_frac = itab.interp( log_nH, log_T )
+                return data[ftype,"Density"] * m_frac * i_frac 
+            return _func
+            
+        ion_path = self._get_owls_ion_data_dir()
+        fname = os.path.join( ion_path, ion+".hdf5" )
+        itab = oit.IonTableOWLS( fname )
+        return get_owls_ion_density_field(ion, ftype, itab)
 
 
 

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/owls/owls_ion_tables.py
--- a/yt/frontends/owls/owls_ion_tables.py
+++ b/yt/frontends/owls/owls_ion_tables.py
@@ -1,8 +1,8 @@
-""" 
+"""
 OWLS ion tables
 
 A module to handle the HM01 UV background spectra and ionization data from the
-OWLS photoionization equilibrium lookup tables. 
+OWLS photoionization equilibrium lookup tables.
 
 
 
@@ -17,27 +17,28 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.on_demand_imports import _h5py as h5py
+import yt.extern.six as six
 import numpy as np
 
 
 
 
-def h5rd( fname, path, dtype=None ):
+def h5rd(fname, path, dtype=None):
     """ Read Data. Return a dataset located at <path> in file <fname> as
-    a numpy array. 
+    a numpy array.
     e.g. rd( fname, '/PartType0/Coordinates' ). """
 
     data = None
-    with h5py.File( fname, 'r' ) as h5f:
-        ds = h5f[path]
-        if dtype is None:
-            dtype = ds.dtype
-        data = np.zeros( ds.shape, dtype=dtype )
-        data = ds.value
+    fid = h5py.h5f.open(six.b(fname), h5py.h5f.ACC_RDONLY)
+    dg = h5py.h5d.open(fid, path.encode('ascii'))
+    if dtype is None:
+       dtype = dg.dtype
+    data = np.zeros(dg.shape, dtype=dtype)
+    dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
+    fid.close()
     return data
 
 
-
 class IonTableSpectrum:
 
     """ A class to handle the HM01 spectra in the OWLS ionization tables. """
@@ -45,17 +46,16 @@
     def __init__(self, ion_file):
 
         where = '/header/spectrum/gammahi'
-        self.GH1 = h5rd( ion_file, where ) # GH1[1/s]
+        self.GH1 = h5rd(ion_file, where) # GH1[1/s]
 
         where = '/header/spectrum/logenergy_ryd'
-        self.logryd = h5rd( ion_file, where ) # E[ryd]  
+        self.logryd = h5rd(ion_file, where) # E[ryd]
 
         where = '/header/spectrum/logflux'
-        self.logflux = h5rd( ion_file, where ) # J[ergs/s/Hz/Sr/cm^2] 
+        self.logflux = h5rd(ion_file, where) # J[ergs/s/Hz/Sr/cm^2]
 
         where = '/header/spectrum/redshift'
-        self.z = h5rd( ion_file, where ) # z
-
+        self.z = h5rd(ion_file, where) # z
 
 
     def return_table_GH1_at_z(self,z):
@@ -68,9 +68,9 @@
         else:
             i_zhi = i_zlo
             i_zlo = i_zlo - 1
-    
+
         z_frac = (z - self.z[i_zlo]) / (self.z[i_zhi] - self.z[i_zlo])
-   
+
         # find GH1 from table
         #-----------------------------------------------------------------
         logGH1_all = np.log10( self.GH1 )
@@ -80,8 +80,6 @@
         GH1_table = 10.0**logGH1_table
 
         return GH1_table
-    
-
 
 
 class IonTableOWLS:
@@ -90,7 +88,7 @@
 
     DELTA_nH = 0.25
     DELTA_T = 0.1
-    
+
     def __init__(self, ion_file):
 
         self.ion_file = ion_file
@@ -104,13 +102,13 @@
 
         # read the ionization fractions
         # linear values stored in file so take log here
-        # ionbal is the ionization balance (i.e. fraction) 
+        # ionbal is the ionization balance (i.e. fraction)
         #---------------------------------------------------------------
-        self.ionbal = h5rd( ion_file, '/ionbal' ).astype(np.float64)    
+        self.ionbal = h5rd( ion_file, '/ionbal' ).astype(np.float64)
         self.ionbal_orig = self.ionbal.copy()
 
-        ipositive = np.where( self.ionbal > 0.0 )
-        izero = np.where( self.ionbal <= 0.0 )
+        ipositive = self.ionbal > 0.0
+        izero = np.logical_not(ipositive)
         self.ionbal[izero] = self.ionbal[ipositive].min()
 
         self.ionbal = np.log10( self.ionbal )
@@ -118,7 +116,7 @@
 
         # load in background spectrum
         #---------------------------------------------------------------
-        self.spectrum = IonTableSpectrum( ion_file ) 
+        self.spectrum = IonTableSpectrum( ion_file )
 
         # calculate the spacing along each dimension
         #---------------------------------------------------------------
@@ -129,9 +127,6 @@
         self.order_str = '[log nH, log T, z]'
 
 
-            
-        
-                                                
     # sets iz and fz
     #-----------------------------------------------------
     def set_iz( self, z ):
@@ -149,11 +144,11 @@
                     self.fz = ( z - self.z[iz] ) / self.dz[iz]
                     break
 
-        
+
 
     # interpolate the table at a fixed redshift for the input
-    # values of nH and T ( input should be log ).  A simple    
-    # tri-linear interpolation is used.  
+    # values of nH and T ( input should be log ).  A simple
+    # tri-linear interpolation is used.
     #-----------------------------------------------------
     def interp( self, nH, T ):
 
@@ -162,7 +157,7 @@
 
         if nH.size != T.size:
             raise ValueError(' owls_ion_tables: array size mismatch !!! ')
-        
+
         # field discovery will have nH.size == 1 and T.size == 1
         # in that case we simply return 1.0
 
@@ -185,14 +180,14 @@
         x_T_clip = np.clip( x_T, 0.0, self.T.size-1.001 )
         fT,iT = np.modf( x_T_clip )
         iT = iT.astype( np.int32 )
-        
+
 
         # short names for previously calculated iz and fz
         #-----------------------------------------------------
         iz = self.iz
         fz = self.fz
 
-                   
+
         # calculate interpolated value
         # use tri-linear interpolation on the log values
         #-----------------------------------------------------

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -27,6 +27,8 @@
     ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
+from yt.funcs import \
+    get_requests
 from .fields import \
     SDFFieldInfo
 from yt.utilities.sdf import \
@@ -34,11 +36,6 @@
     SDFIndex,\
     HTTPSDFRead
 
-try:
-    import requests
-except ImportError:
-    requests = None
-
 @contextlib.contextmanager
 def safeopen(*args, **kwargs):
     if sys.version[0] != '3':
@@ -195,7 +192,9 @@
     def _is_valid(cls, *args, **kwargs):
         sdf_header = kwargs.get('sdf_header', args[0])
         if sdf_header.startswith("http"):
-            if requests is None: return False
+            requests = get_requests()
+            if requests is None: 
+                return False
             hreq = requests.get(sdf_header, stream=True)
             if hreq.status_code != 200: return False
             # Grab a whole 4k page.

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -953,3 +953,10 @@
         raise RuntimeError(
             "Please install palettable to use colorbrewer colormaps")
     return bmap.get_mpl_colormap(N=cmap[2])
+
+def get_requests():
+    try:
+        import requests
+    except ImportError:
+        requests = None
+    return requests

diff -r 8a110e6a0156e23c47f7c9d5d71d9d402dd65cf3 -r e551ce7068575e92b30888d53ced01619343c5cd yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -317,6 +317,41 @@
         oinfo.level = level
         return cur
 
+    def locate_positions(self, np.float64_t[:,:] positions):
+        """
+        This routine, meant to be called by other internal routines, returns a
+        list of oct IDs and a dictionary of Oct info for all the positions
+        supplied.  Positions must be in code_length.
+        """
+        cdef np.float64_t factor = (1 << self.oref)
+        cdef dict all_octs = {}
+        cdef OctInfo oi
+        cdef Oct* o = NULL
+        cdef np.float64_t pos[3]
+        cdef np.ndarray[np.uint8_t, ndim=1] recorded
+        cdef np.ndarray[np.int64_t, ndim=1] oct_id
+        oct_id = np.ones(positions.shape[0], dtype="int64") * -1
+        recorded = np.zeros(self.nocts, dtype="uint8")
+        cdef np.int64_t i, j, k
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i,j]
+            o = self.get(pos, &oi)
+            if o == NULL:
+                raise RuntimeError
+            if recorded[o.domain_ind] == 0:
+                left_edge = np.asarray(<np.float64_t[:3]>oi.left_edge).copy()
+                dds = np.asarray(<np.float64_t[:3]>oi.dds).copy()
+                right_edge = left_edge + dds*factor
+                all_octs[o.domain_ind] = dict(
+                    left_edge = left_edge,
+                    right_edge = right_edge,
+                    level = oi.level
+                )
+                recorded[o.domain_ind] = 1
+            oct_id[i] = o.domain_ind
+        return oct_id, all_octs
+
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.num_domains, dtype="uint8")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f59ea84b302c/
Changeset:   f59ea84b302c
Branch:      yt
User:        hyschive
Date:        2016-05-09 01:19:05+00:00
Summary:     Put GAMER **after** Gadget in doc/source/examining/loading_data.rst
Affected #:  1 file

diff -r e551ce7068575e92b30888d53ced01619343c5cd -r f59ea84b302cb1782464fcb72591dd4f818e4511 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -806,34 +806,6 @@
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
   other :ref:`unit systems <unit_systems>` is also possible.
 
-GAMER Data
-----------
-
-GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("InteractingJets/jet_000002")
-
-Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
-you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
-
-.. code-block:: python
-
-   import yt
-   code_units = { "length_unit":(1.0,"kpc"),
-                  "time_unit"  :(3.08567758096e+13,"s"),
-                  "mass_unit"  :(1.4690033e+36,"g") }
-   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
-
-This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
-e.g., ``("gamer","Dens")``, will be in code units.
-
-.. rubric:: Caveats
-
-* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
-
 .. _loading-gadget-data:
 
 Gadget Data
@@ -1049,6 +1021,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data


https://bitbucket.org/yt_analysis/yt/commits/01c911476af3/
Changeset:   01c911476af3
Branch:      yt
User:        ngoldbaum
Date:        2016-05-11 18:32:02+00:00
Summary:     Merged in hyschive/yt-hyschive (pull request #2150)

GAMER frontend
Affected #:  14 files

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1021,6 +1021,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,6 +34,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,6 +20,9 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gamer_000:
+    - yt/frontends/gamer/tests/test_outputs.py
+
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'flash',
     'gadget',
     'gadget_fof',
+    'gamer',
     'gdf',
     'halo_catalog',
     'http_stream',

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/__init__.py
--- /dev/null
+++ b/yt/frontends/gamer/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.gamer
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/api.py
--- /dev/null
+++ b/yt/frontends/gamer/api.py
@@ -0,0 +1,28 @@
+"""
+API for yt.frontends.gamer
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      GAMERGrid, \
+      GAMERHierarchy, \
+      GAMERDataset
+
+from .fields import \
+      GAMERFieldInfo
+
+from .io import \
+      IOHandlerGAMER
+
+### NOT SUPPORTED YET
+#from . import tests

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/data_structures.py
--- /dev/null
+++ b/yt/frontends/gamer/data_structures.py
@@ -0,0 +1,277 @@
+"""
+GAMER-specific data structures
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import stat
+import numpy as np
+import weakref
+
+from yt.funcs import mylog
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.data_objects.static_output import \
+    Dataset
+from yt.utilities.file_handler import \
+    HDF5FileHandler
+from .fields import GAMERFieldInfo
+from yt.testing import assert_equal
+
+
+
+class GAMERGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, id, index, level):
+        AMRGridPatch.__init__(self, id,
+                              filename = index.index_filename,
+                              index    = index)
+        self.Parent   = None    # do NOT initialize Parent as []
+        self.Children = []
+        self.Level    = level
+
+    def __repr__(self):
+        return 'GAMERGrid_%09i (dimension = %s)' % (self.id, self.ActiveDimensions)
+
+
+class GAMERHierarchy(GridIndex):
+    grid                 = GAMERGrid
+    _preload_implemented = True # since gamer defines "_read_chunk_data" in io.py
+    
+    def __init__(self, ds, dataset_type = 'gamer'):
+        self.dataset_type     = dataset_type
+        self.dataset          = weakref.proxy(ds)
+        self.index_filename   = self.dataset.parameter_filename
+        self.directory        = os.path.dirname(self.index_filename)
+        self._handle          = ds._handle
+        self.float_type       = 'float64' # fixed even when FLOAT8 is off
+        self._particle_handle = ds._particle_handle
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _detect_output_fields(self):
+        # find all field names in the current dataset
+        self.field_list = [ ('gamer', v) for v in self._handle['Data'].keys() ]
+    
+    def _count_grids(self):
+        # count the total number of patches at all levels  
+        self.num_grids = self.dataset.parameters['NPatch'].sum()
+        
+    def _parse_index(self):
+        parameters       = self.dataset.parameters
+        gid0             = 0
+        grid_corner      = self._handle['Tree/Corner'].value
+        convert2physical = self._handle['Tree/Corner'].attrs['Cvt2Phy']
+
+        self.grid_dimensions    [:] = parameters['PatchSize']
+        self.grid_particle_count[:] = 0
+
+        for lv in range(0, parameters['NLevel']):
+            num_grids_level = parameters['NPatch'][lv]
+            if num_grids_level == 0: break
+
+            patch_scale = parameters['PatchSize']*parameters['CellScale'][lv]
+
+            # set the level and edge of each grid
+            # (left/right_edge are YT arrays in code units)
+            self.grid_levels.flat[ gid0:gid0 + num_grids_level ] = lv
+            self.grid_left_edge[ gid0:gid0 + num_grids_level ] \
+                = grid_corner[ gid0:gid0 + num_grids_level ]*convert2physical
+            self.grid_right_edge[ gid0:gid0 + num_grids_level ] \
+                = (grid_corner[ gid0:gid0 + num_grids_level ] + patch_scale)*convert2physical
+
+            gid0 += num_grids_level
+
+        # allocate all grid objects
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in range(self.num_grids):
+            self.grids[i] = self.grid(i, self, self.grid_levels.flat[i])
+
+        # maximum level with patches (which can be lower than MAX_LEVEL)
+        self.max_level = self.grid_levels.max()
+        
+    def _populate_grid_objects(self):
+        son_list = self._handle["Tree/Son"].value
+
+        for gid in range(self.num_grids):
+            grid     = self.grids.flat[gid]
+            son_gid0 = son_list[gid]
+
+            # set up the parent-children relationship
+            if son_gid0 >= 0:
+                grid.Children = [ self.grids.flat[son_gid0+s] for s in range(8) ]
+
+            for son_grid in grid.Children: son_grid.Parent = grid
+
+            # set up other grid attributes
+            grid._prepare_grid()
+            grid._setup_dx()
+
+        # validate the parent-children relationship in the debug mode
+        if self.dataset._debug:
+            self._validate_parent_children_relasionship()
+
+    # for _debug mode only
+    def _validate_parent_children_relasionship(self):
+        mylog.info('Validating the parent-children relationship ...')
+
+        father_list = self._handle["Tree/Father"].value
+
+        for grid in self.grids:
+            # parent->children == itself
+            if grid.Parent is not None:
+                assert grid.Parent.Children[0+grid.id%8] is grid, \
+                       'Grid %d, Parent %d, Parent->Children %d' % \
+                       (grid.id, grid.Parent.id, grid.Parent.Children[0].id)
+
+            # children->parent == itself
+            for c in grid.Children:
+                assert c.Parent is grid, \
+                       'Grid %d, Children %d, Children->Parent %d' % \
+                       (grid.id, c.id, c.Parent.id)
+
+            # all refinement grids should have parent 
+            if grid.Level > 0:
+                assert grid.Parent is not None and grid.Parent.id >= 0, \
+                       'Grid %d, Level %d, Parent %d' % \
+                       (grid.id, grid.Level, \
+                        grid.Parent.id if grid.Parent is not None else -999)
+
+            # parent index is consistent with the loaded dataset
+            if grid.Level > 0:
+                father_gid = father_list[grid.id]
+                assert father_gid == grid.Parent.id, \
+                       'Grid %d, Level %d, Parent_Found %d, Parent_Expect %d'%\
+                       (grid.id, grid.Level, grid.Parent.id, father_gid)
+
+            # edges between children and parent
+            if len(grid.Children) > 0:
+                assert_equal(grid.LeftEdge,  grid.Children[0].LeftEdge )
+                assert_equal(grid.RightEdge, grid.Children[7].RightEdge)
+        mylog.info('Check passed')
+               
+
+class GAMERDataset(Dataset):
+    _index_class      = GAMERHierarchy
+    _field_info_class = GAMERFieldInfo
+    _handle           = None
+    _debug            = False # debug mode for the GAMER frontend
+    
+    def __init__(self, filename,
+                 dataset_type      = 'gamer',
+                 storage_filename  = None,
+                 particle_filename = None, 
+                 units_override    = None,
+                 unit_system       = "cgs"):
+
+        if self._handle is not None: return
+
+        self.fluid_types      += ('gamer',)
+        self._handle           = HDF5FileHandler(filename)
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None:
+            self._particle_handle = self._handle
+        else:
+            try:
+                self._particle_handle = HDF5FileHandler(self.particle_filename)
+            except:
+                raise IOError(self.particle_filename)
+
+        # currently GAMER only supports refinement by a factor of 2
+        self.refine_by = 2
+
+        Dataset.__init__(self, filename, dataset_type,
+                         units_override = units_override,
+                         unit_system    = unit_system)
+        self.storage_filename = storage_filename
+        
+    def _set_code_unit_attributes(self):
+        # GAMER does not assume any unit yet ...
+        if len(self.units_override) == 0:
+            mylog.warning("GAMER does not assume any unit ==> " +
+                          "Use units_override to specify the units")
+
+        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
+            setattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
+
+            if len(self.units_override) == 0:
+                mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
+        
+    def _parse_parameter_file(self):
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        # shortcuts for different simulation information
+        KeyInfo   = self._handle['Info']['KeyInfo']
+        InputPara = self._handle['Info']['InputPara']
+        Makefile  = self._handle['Info']['Makefile']
+        SymConst  = self._handle['Info']['SymConst']
+
+        # simulation time and domain
+        self.current_time      = KeyInfo['Time'][0]
+        self.dimensionality    = 3  # always 3D
+        self.domain_left_edge  = np.array([0.,0.,0.], dtype='float64')
+        self.domain_right_edge = KeyInfo['BoxSize'].astype('float64')
+        self.domain_dimensions = KeyInfo['NX0'].astype('int64')
+
+        # periodicity
+        periodic         = InputPara['Opt__BC_Flu'][0] == 0
+        self.periodicity = (periodic,periodic,periodic)
+
+        # cosmological parameters
+        if Makefile['Comoving']:
+            self.cosmological_simulation = 1
+            self.current_redshift        = 1.0/self.current_time - 1.0
+            self.omega_matter            = InputPara['OmegaM0'] 
+            self.omega_lambda            = 1.0 - self.omega_matter
+            self.hubble_constant         = 0.6955   # H0 is not set in GAMER
+        else:
+            self.cosmological_simulation = 0
+            self.current_redshift        = 0.0
+            self.omega_matter            = 0.0
+            self.omega_lambda            = 0.0
+            self.hubble_constant         = 0.0
+
+        # code-specific parameters
+        for t in KeyInfo, InputPara, Makefile, SymConst:
+            for v in t.dtype.names: self.parameters[v] = t[v]
+
+        # reset 'Model' to be more readable
+        if KeyInfo['Model'] == 1:
+            self.parameters['Model'] = 'Hydro'
+        elif KeyInfo['Model'] == 2:
+            self.parameters['Model'] = 'MHD'
+        elif KeyInfo['Model'] == 3:
+            self.parameters['Model'] = 'ELBDM'
+        else:
+            self.parameters['Model'] = 'Unknown'
+
+        # make aliases to some frequently used variables
+        if self.parameters['Model'] == 'Hydro' or \
+           self.parameters['Model'] == 'MHD':
+            self.gamma = self.parameters["Gamma"]
+            self.mu    = self.parameters.get("mu",0.6) # mean molecular weight
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            # define a unique way to identify GAMER datasets
+            f = HDF5FileHandler(args[0])
+            if 'Info' in f['/'].keys() and 'KeyInfo' in f['/Info'].keys():
+                return True
+        except:
+            pass
+        return False

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/fields.py
--- /dev/null
+++ b/yt/frontends/gamer/fields.py
@@ -0,0 +1,110 @@
+"""
+GAMER-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.fields.field_info_container import FieldInfoContainer
+from yt.utilities.physical_constants import mh, boltzmann_constant_cgs
+
+b_units   = "code_magnetic"
+pre_units = "code_mass / (code_length*code_time**2)"
+erg_units = "code_mass / (code_length*code_time**2)"
+rho_units = "code_mass / code_length**3"
+mom_units = "code_mass / (code_length**2*code_time)"
+vel_units = "code_velocity"
+pot_units = "code_length**2/code_time**2"
+
+psi_units = "code_mass**(1/2) / code_length**(3/2)"
+
+
+class GAMERFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # hydro fields on disk (GAMER outputs conservative variables)
+        ( "Dens", (rho_units, ["density"],                 r"\rho") ),
+        ( "MomX", (mom_units, ["momentum_x"],              None   ) ),
+        ( "MomY", (mom_units, ["momentum_y"],              None   ) ),
+        ( "MomZ", (mom_units, ["momentum_z"],              None   ) ),
+        ( "Engy", (erg_units, ["total_energy_per_volume"], None   ) ),
+        ( "Pote", (pot_units, ["gravitational_potential"], None   ) ),
+
+        # psiDM fields on disk
+        ( "Real", (psi_units, ["psidm_real_part"],         None   ) ),
+        ( "Imag", (psi_units, ["psidm_imaginary_part"],    None   ) ),
+    )
+
+    known_particle_fields = (
+    )
+
+    def __init__(self, ds, field_list):
+        super(GAMERFieldInfo, self).__init__(ds, field_list)
+
+    # add primitive and other derived variables
+    def setup_fluid_fields(self):
+        unit_system = self.ds.unit_system
+
+        # velocity
+        def velocity_xyz(v):
+            def _velocity(field, data):
+                return data["gas", "momentum_%s"%v] / data["gas","density"]
+            return _velocity
+        for v in "xyz":
+            self.add_field( ("gas","velocity_%s"%v), function = velocity_xyz(v),
+                            units = unit_system["velocity"] )
+
+        # ============================================================================
+        # note that yt internal fields assume
+        #    [thermal_energy]          = [energy per mass]
+        #    [kinetic_energy]          = [energy per volume]
+        # and we further adopt
+        #    [total_energy]            = [energy per mass]
+        #    [total_energy_per_volume] = [energy per volume]
+        # ============================================================================
+
+        # kinetic energy per volume
+        def ek(data):
+            return 0.5*( data["gamer","MomX"]**2 +
+                         data["gamer","MomY"]**2 +
+                         data["gamer","MomZ"]**2 ) / data["gamer","Dens"]
+
+        # thermal energy per volume
+        def et(data):
+            return data["gamer","Engy"] - ek(data)
+
+        # thermal energy per mass (i.e., specific)
+        def _thermal_energy(field, data):
+            return et(data) / data["gamer","Dens"]
+        self.add_field( ("gas","thermal_energy"), function = _thermal_energy,
+                        units = unit_system["specific_energy"] )
+
+        # total energy per mass
+        def _total_energy(field, data):
+            return data["gamer","Engy"] / data["gamer","Dens"]
+        self.add_field( ("gas","total_energy"), function = _total_energy,
+                        units = unit_system["specific_energy"] )
+
+        # pressure
+        def _pressure(field, data):
+            return et(data)*(data.ds.gamma-1.0)
+        self.add_field( ("gas","pressure"), function = _pressure,
+                        units = unit_system["pressure"] )
+
+        # temperature
+        def _temperature(field, data):
+            return data.ds.mu*mh*data["gas","pressure"] / \
+                   (data["gas","density"]*boltzmann_constant_cgs)
+        self.add_field( ("gas","temperature"), function = _temperature,
+                        units = unit_system["temperature"] )
+
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/io.py
--- /dev/null
+++ b/yt/frontends/gamer/io.py
@@ -0,0 +1,90 @@
+"""
+GAMER-specific IO functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from itertools import groupby
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+
+
+#-----------------------------------------------------------------------------
+# GAMER shares a similar HDF5 format, and thus io.py as well, with FLASH
+#-----------------------------------------------------------------------------
+
+
+# group grids with consecutive indices together to improve the I/O performance
+def grid_sequences(grids):
+    for k, g in groupby( enumerate(grids), lambda i_x1:i_x1[0]-i_x1[1].id ):
+        seq = list(v[1] for v in g)
+        yield seq
+
+class IOHandlerGAMER(BaseIOHandler):
+    _particle_reader = False
+    _dataset_type    = "gamer"
+
+    def __init__(self, ds):
+        super(IOHandlerGAMER, self).__init__(ds)
+        self._handle      = ds._handle
+        self._field_dtype = "float64" # fixed even when FLOAT8 is off
+
+    def _read_particle_coords(self, chunks, ptf):
+        pass
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks) # generator --> list
+
+        if any( (ftype != "gamer" for ftype, fname in fields) ):
+            raise NotImplementedError
+
+        rv = {}
+        for field in fields: rv[field] = np.empty( size, dtype=self._field_dtype )
+
+        ng = sum( len(c.objs) for c in chunks ) # c.objs is a list of grids
+        mylog.debug( "Reading %s cells of %s fields in %s grids",
+                     size, [f2 for f1, f2 in fields], ng )
+
+        for field in fields:
+            ds     = self._handle[ "/Data/%s" % field[1] ]
+            offset = 0
+            for chunk in chunks:
+                for gs in grid_sequences(chunk.objs):
+                    start = gs[ 0].id
+                    end   = gs[-1].id + 1
+                    data  = ds[start:end,:,:,:].transpose()
+                    for i, g in enumerate(gs):
+                        offset += g.select( selector, data[...,i], rv[field], offset )
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        rv = {}
+        if len(chunk.objs) == 0: return rv 
+
+        for g in chunk.objs: rv[g.id] = {}
+
+        for field in fields:
+            ds = self._handle[ "/Data/%s" % field[1] ]
+
+            for gs in grid_sequences(chunk.objs):
+                start = gs[ 0].id
+                end   = gs[-1].id + 1
+                data  = ds[start:end,:,:,:].transpose()
+                for i, g in enumerate(gs):
+                    rv[g.id][field] = np.asarray( data[...,i], dtype=self._field_dtype )
+        return rv

diff -r b61ebaeaae2e05c7e2aa6757ff23465c5cf2681d -r 01c911476af375f444f6d83cac45f5a61c9d82e4 yt/frontends/gamer/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/gamer/tests/test_outputs.py
@@ -0,0 +1,63 @@
+"""
+GAMER frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import \
+    assert_equal, \
+    requires_file, \
+    units_override_check
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    data_dir_load
+from yt.frontends.gamer.api import GAMERDataset
+
+
+
+jet         = "InteractingJets/jet_000002"
+_fields_jet = ("temperature", "density", "velocity_magnitude")
+jet_units   = {"length_unit":(1.0,"kpc"),
+               "time_unit"  :(3.08567758096e+13,"s"),
+               "mass_unit"  :(1.4690033e+36,"g")}
+
+ at requires_ds(jet, big_data=True)
+def test_jet():
+    ds = data_dir_load(jet, kwargs={"units_override":jet_units})
+    yield assert_equal, str(ds), "jet_000002"
+    for test in small_patch_amr(ds, _fields_jet):
+        test_jet.__name__ = test.description
+        yield test
+
+
+psiDM         = "WaveDarkMatter/psiDM_000020"
+_fields_psiDM = ("Dens", "Real", "Imag")
+
+ at requires_ds(psiDM, big_data=True)
+def test_psiDM():
+    ds = data_dir_load(psiDM)
+    yield assert_equal, str(ds), "psiDM_000020"
+    for test in small_patch_amr(ds, _fields_psiDM):
+        test_psiDM.__name__ = test.description
+        yield test
+
+
+ at requires_file(psiDM)
+def test_GAMERDataset():
+    assert isinstance(data_dir_load(psiDM), GAMERDataset)
+
+
+ at requires_file(jet)
+def test_units_override():
+    for test in units_override_check(jet):
+        yield test

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160511/c3850161/attachment.html>


More information about the yt-svn mailing list