[yt-svn] commit/yt: 44 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 7 11:44:50 PDT 2016


44 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a05016e6741e/
Changeset:   a05016e6741e
Branch:      yt
User:        brittonsmith
Date:        2014-11-10 21:29:15+00:00
Summary:     Clump info items now store results in a dictionary hanging off the clump object.
Affected #:  1 file

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r a05016e6741eb5a3ae1b92d242fe428cce440e0b yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -21,14 +21,15 @@
 clump_info_registry = OperatorRegistry()
 
 def add_clump_info(name, function):
-    clump_info_registry[name] = ClumpInfoCallback(function)
+    clump_info_registry[name] = ClumpInfoCallback(name, function)
 
 class ClumpInfoCallback(object):
     r"""
     A ClumpInfoCallback is a function that takes a clump, computes a 
     quantity, and returns a string to be printed out for writing clump info.
     """
-    def __init__(self, function, args=None, kwargs=None):
+    def __init__(self, name, function, args=None, kwargs=None):
+        self.name = name
         self.function = function
         self.args = args
         if self.args is None: self.args = []
@@ -36,43 +37,46 @@
         if self.kwargs is None: self.kwargs = {}
 
     def __call__(self, clump):
-        return self.function(clump, *self.args, **self.kwargs)
+        if self.name not in clump.info:
+            clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
+        rv = clump.info[self.name]
+        return rv[0] % rv[1]
     
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
-    return "Cells: %d." % n_cells
+    return "Cells: %d.", n_cells
 add_clump_info("total_cells", _total_cells)
 
 def _cell_mass(clump):
     cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
-    return "Mass: %e Msun." % cell_mass
+    return "Mass: %e Msun.", cell_mass
 add_clump_info("cell_mass", _cell_mass)
 
 def _mass_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
-    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (mass-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
 
 def _volume_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("index", "cell_volume")).in_units("Msun")
-    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (volume-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
 
 def _max_grid_level(clump):
     max_level = clump.data["index", "grid_level"].max()
-    return "Max grid level: %d." % max_level
+    return "Max grid level: %d.", max_level
 add_clump_info("max_grid_level", _max_grid_level)
 
 def _min_number_density(clump):
     min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
-    return "Min number density: %.6e cm^-3." % min_n
+    return "Min number density: %.6e cm^-3.", min_n
 add_clump_info("min_number_density", _min_number_density)
 
 def _max_number_density(clump):
     max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
-    return "Max number density: %.6e cm^-3." % max_n
+    return "Max number density: %.6e cm^-3.", max_n
 add_clump_info("max_number_density", _max_number_density)
 
 def _distance_to_main_clump(clump, units="pc"):
@@ -82,6 +86,7 @@
     master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
     my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e %s." % \
-      (distance.in_units(units), units)
+    distance.convert_to_units("pc")
+    return "Distance from master center of mass: %%.6e %s." % units, \
+      distance.in_units(units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)


https://bitbucket.org/yt_analysis/yt/commits/847b3fa54216/
Changeset:   847b3fa54216
Branch:      yt
User:        brittonsmith
Date:        2014-11-10 21:29:32+00:00
Summary:     Added hdf5 writing routines.
Affected #:  1 file

diff -r a05016e6741eb5a3ae1b92d242fe428cce440e0b -r 847b3fa54216ac6084a0436a02eb6a6cb57cdaff yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import copy
+import h5py
 import numpy as np
 import uuid
 
@@ -54,6 +55,7 @@
         self.quantities = data.quantities
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
+        self.info = {}
 
         # List containing characteristics about clumps that are to be written 
         # out by the write routines.
@@ -292,3 +294,46 @@
             write_clumps(child, 0, fh)
     if top:
         fh.close()
+
+def write_clump_index_h5(clump, level, fh):
+    print level
+    top = False
+    if not isinstance(fh, h5py.File) and \
+      not isinstance(fh, h5py.Group):
+        fh = h5py.File(fh, "w")
+        top = True
+    for item in clump.clump_info:
+        item(clump)
+        my_info = clump.info[item.name][1]
+        fh.attrs[item.name] = my_info
+        if hasattr(my_info, "units") and \
+          "dimensionless" not in str(my_info.units):
+            units = str(my_info.units)
+        else:
+            units = ""
+        fh.attrs["%s_units" % item.name] = units
+    if ((clump.children is not None) and (len(clump.children) > 0)):
+        i = 0
+        for child in clump.children:
+            my_group = fh.create_group("child_%04d" % i)
+            write_clump_index_h5(child, (level+1), my_group)
+            i += 1
+    if top:
+        fh.close()
+        
+def write_clumps_h5(clump, filename):
+    clump_list = get_lowest_clumps(clump)
+    fh = h5py.File(filename, "w")
+    for item in clump.clump_info:
+        quantity = []
+        for my_clump in clump_list:
+            item(my_clump)
+            quantity.append(my_clump.info[item.name][1])
+        quantity = clump.data.ds.arr(quantity)
+        dataset = fh.create_dataset(item.name, data=quantity)
+        if "dimensionless" in str(quantity.units):
+            units = ""
+        else:
+            units = str(quantity.units)
+        dataset.attrs["units"] = units
+    fh.close()


https://bitbucket.org/yt_analysis/yt/commits/37557b3530e5/
Changeset:   37557b3530e5
Branch:      yt
User:        brittonsmith
Date:        2016-04-26 13:30:01+00:00
Summary:     Merging in clump work from a while ago.
Affected #:  2 files

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 37557b3530e52069f0dccd4b6b0efa4aaa0969e3 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import copy
+import h5py
 import numpy as np
 import uuid
 
@@ -56,6 +57,7 @@
         self.quantities = data.quantities
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
+        self.info = {}
 
         # List containing characteristics about clumps that are to be written 
         # out by the write routines.
@@ -299,3 +301,46 @@
             write_clumps(child, 0, fh)
     if top:
         fh.close()
+
+def write_clump_index_h5(clump, level, fh):
+    print level
+    top = False
+    if not isinstance(fh, h5py.File) and \
+      not isinstance(fh, h5py.Group):
+        fh = h5py.File(fh, "w")
+        top = True
+    for item in clump.clump_info:
+        item(clump)
+        my_info = clump.info[item.name][1]
+        fh.attrs[item.name] = my_info
+        if hasattr(my_info, "units") and \
+          "dimensionless" not in str(my_info.units):
+            units = str(my_info.units)
+        else:
+            units = ""
+        fh.attrs["%s_units" % item.name] = units
+    if ((clump.children is not None) and (len(clump.children) > 0)):
+        i = 0
+        for child in clump.children:
+            my_group = fh.create_group("child_%04d" % i)
+            write_clump_index_h5(child, (level+1), my_group)
+            i += 1
+    if top:
+        fh.close()
+        
+def write_clumps_h5(clump, filename):
+    clump_list = get_lowest_clumps(clump)
+    fh = h5py.File(filename, "w")
+    for item in clump.clump_info:
+        quantity = []
+        for my_clump in clump_list:
+            item(my_clump)
+            quantity.append(my_clump.info[item.name][1])
+        quantity = clump.data.ds.arr(quantity)
+        dataset = fh.create_dataset(item.name, data=quantity)
+        if "dimensionless" in str(quantity.units):
+            units = ""
+        else:
+            units = str(quantity.units)
+        dataset.attrs["units"] = units
+    fh.close()

diff -r 77db965d0a537a9884d6db3c5ad7595dc3070ba0 -r 37557b3530e52069f0dccd4b6b0efa4aaa0969e3 yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -21,14 +21,15 @@
 clump_info_registry = OperatorRegistry()
 
 def add_clump_info(name, function):
-    clump_info_registry[name] = ClumpInfoCallback(function)
+    clump_info_registry[name] = ClumpInfoCallback(name, function)
 
 class ClumpInfoCallback(object):
     r"""
     A ClumpInfoCallback is a function that takes a clump, computes a 
     quantity, and returns a string to be printed out for writing clump info.
     """
-    def __init__(self, function, args=None, kwargs=None):
+    def __init__(self, name, function, args=None, kwargs=None):
+        self.name = name
         self.function = function
         self.args = args
         if self.args is None: self.args = []
@@ -36,43 +37,46 @@
         if self.kwargs is None: self.kwargs = {}
 
     def __call__(self, clump):
-        return self.function(clump, *self.args, **self.kwargs)
+        if self.name not in clump.info:
+            clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
+        rv = clump.info[self.name]
+        return rv[0] % rv[1]
     
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
-    return "Cells: %d." % n_cells
+    return "Cells: %d.", n_cells
 add_clump_info("total_cells", _total_cells)
 
 def _cell_mass(clump):
     cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
-    return "Mass: %e Msun." % cell_mass
+    return "Mass: %e Msun.", cell_mass
 add_clump_info("cell_mass", _cell_mass)
 
 def _mass_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
-    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (mass-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
 
 def _volume_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("index", "cell_volume")).in_units("Msun")
-    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (volume-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
 
 def _max_grid_level(clump):
     max_level = clump.data["index", "grid_level"].max()
-    return "Max grid level: %d." % max_level
+    return "Max grid level: %d.", max_level
 add_clump_info("max_grid_level", _max_grid_level)
 
 def _min_number_density(clump):
     min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
-    return "Min number density: %.6e cm^-3." % min_n
+    return "Min number density: %.6e cm^-3.", min_n
 add_clump_info("min_number_density", _min_number_density)
 
 def _max_number_density(clump):
     max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
-    return "Max number density: %.6e cm^-3." % max_n
+    return "Max number density: %.6e cm^-3.", max_n
 add_clump_info("max_number_density", _max_number_density)
 
 def _distance_to_main_clump(clump, units="pc"):
@@ -82,6 +86,7 @@
     master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
     my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e %s." % \
-      (distance.in_units(units), units)
+    distance.convert_to_units("pc")
+    return "Distance from master center of mass: %%.6e %s." % units, \
+      distance.in_units(units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)


https://bitbucket.org/yt_analysis/yt/commits/60433b9e68f4/
Changeset:   60433b9e68f4
Branch:      yt
User:        brittonsmith
Date:        2016-04-27 13:39:00+00:00
Summary:     Add brute force method for finding particles contained within a collection of cells so we can query particle fields from clumps.
Affected #:  3 files

diff -r 37557b3530e52069f0dccd4b6b0efa4aaa0969e3 -r 60433b9e68f47828d257bd073a7a47578f677faf yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -59,6 +59,9 @@
         self.max_val = self.data[field].max()
         self.info = {}
 
+        if parent is not None:
+            self.data.parent = self.parent.data
+
         # List containing characteristics about clumps that are to be written 
         # out by the write routines.
         if clump_info is None:

diff -r 37557b3530e52069f0dccd4b6b0efa4aaa0969e3 -r 60433b9e68f47828d257bd073a7a47578f677faf yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -24,12 +24,13 @@
     iterable, \
     validate_width_tuple, \
     fix_length
+from yt.geometry.selection_routines import \
+    points_in_cells
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
     YTSphereTooSmall, \
     YTIllDefinedCutRegion, \
-    YTMixedCutRegion, \
     YTEllipsoidOrdering
 from yt.utilities.minimal_representation import \
     MinimalSliceData
@@ -793,8 +794,10 @@
         for field in fields:
             f = self.base_object[field]
             if f.shape != ind.shape:
-                raise YTMixedCutRegion(self.conditionals, field)
-            self.field_data[field] = self.base_object[field][ind]
+                parent = getattr(self, "parent", self.base_object)
+                self.field_data[field] = parent[field][self._part_ind]
+            else:
+                self.field_data[field] = self.base_object[field][ind]
 
     @property
     def blocks(self):
@@ -809,18 +812,35 @@
             if not np.any(m): continue
             yield obj, m
 
+    _cell_mask = None
     @property
     def _cond_ind(self):
-        ind = None
-        obj = self.base_object
-        with obj._field_parameter_state(self.field_parameters):
-            for cond in self.conditionals:
-                res = eval(cond)
-                if ind is None: ind = res
-                if ind.shape != res.shape:
-                    raise YTIllDefinedCutRegion(self.conditionals)
-                np.logical_and(res, ind, ind)
-        return ind
+        if self._cell_mask is None:
+            ind = None
+            obj = self.base_object
+            with obj._field_parameter_state(self.field_parameters):
+                for cond in self.conditionals:
+                    res = eval(cond)
+                    if ind is None: ind = res
+                    if ind.shape != res.shape:
+                        raise YTIllDefinedCutRegion(self.conditionals)
+                    np.logical_and(res, ind, ind)
+            self._cell_mask = ind
+        return self._cell_mask
+
+    _particle_mask = None
+    @property
+    def _part_ind(self):
+        if self._particle_mask is None:
+            parent = getattr(self, "parent", self.base_object)
+            mask = points_in_cells(
+                self["x"], self["y"], self["z"],
+                self["dx"], self["dy"], self["dz"],
+                parent["particle_position_x"].to("code_length"),
+                parent["particle_position_y"].to("code_length"),
+                parent["particle_position_z"].to("code_length"))
+            self._particle_mask = mask
+        return self._particle_mask
 
     @property
     def icoords(self):

diff -r 37557b3530e52069f0dccd4b6b0efa4aaa0969e3 -r 60433b9e68f47828d257bd073a7a47578f677faf yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2046,3 +2046,42 @@
         return ("halo_particles", self.halo_id)
 
 halo_particles_selector = HaloParticlesSelector
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def points_in_cells(
+        np.float64_t[:] cx,
+        np.float64_t[:] cy,
+        np.float64_t[:] cz,
+        np.float64_t[:] dx,
+        np.float64_t[:] dy,
+        np.float64_t[:] dz,
+        np.float64_t[:] px,
+        np.float64_t[:] py,
+        np.float64_t[:] pz):
+    # Take a list of cells and particles and calculate which particles
+    # are enclosed within one of the cells.  This is used for querying
+    # particle fields on clump/contour objects.
+    # We use brute force since the cells are a relatively unordered collection.
+
+    cdef int p, c, n_p, n_c
+
+    n_p = px.size
+    n_c = cx.size
+    mask = np.ones(n_p, dtype="bool")
+
+    for p in range(n_p):
+        for c in range(n_c):
+            if fabs(px[p] - cx[c]) > 0.5 * dx[c]:
+                mask[p] = False
+                continue
+            if fabs(py[p] - cy[c]) > 0.5 * dy[c]:
+                mask[p] = False
+                continue
+            if fabs(pz[p] - cz[c]) > 0.5 * dz[c]:
+                mask[p] = False
+                continue
+            if mask[p]: break
+
+    return mask


https://bitbucket.org/yt_analysis/yt/commits/c00b5e9b34ea/
Changeset:   c00b5e9b34ea
Branch:      yt
User:        brittonsmith
Date:        2016-04-27 16:21:28+00:00
Summary:     Adding a means of setting clump ids.
Affected #:  1 file

diff -r 60433b9e68f47828d257bd073a7a47578f677faf -r c00b5e9b34ea400f93e11b2313dd80ee81081eee yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -50,7 +50,8 @@
 class Clump(object):
     children = None
     def __init__(self, data, field, parent=None,
-                 clump_info=None, validators=None):
+                 clump_info=None, validators=None,
+                 base=None):
         self.data = data
         self.field = field
         self.parent = parent
@@ -59,6 +60,13 @@
         self.max_val = self.data[field].max()
         self.info = {}
 
+        if base is None:
+            base = self
+            self.total_clumps = 0
+        self.base = base
+        self.clump_id = self.base.total_clumps
+        self.base.total_clumps += 1
+
         if parent is not None:
             self.data.parent = self.parent.data
 
@@ -162,7 +170,8 @@
                 continue
             self.children.append(Clump(new_clump, self.field, parent=self,
                                        clump_info=self.clump_info,
-                                       validators=self.validators))
+                                       validators=self.validators,
+                                       base=self.base))
 
     def pass_down(self,operation):
         """


https://bitbucket.org/yt_analysis/yt/commits/089ba9aa3fae/
Changeset:   089ba9aa3fae
Branch:      yt
User:        brittonsmith
Date:        2016-06-13 14:52:23+00:00
Summary:     Merging.
Affected #:  168 files

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -1,6 +1,6 @@
 stephenskory at yahoo.com = s at skory.us
 "Stephen Skory stephenskory at yahoo.com" = s at skory.us
-yuan at astro.columbia.edu = bear0980 at gmail.com
+bear0980 at gmail.com = yuan at astro.columbia.edu
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
@@ -19,7 +19,6 @@
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
 jcforbes at ucsc.edu = jforbes at ucolick.org
-ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
 fbogert = fbogert at ucsc.edu
@@ -39,4 +38,12 @@
 jnaiman at ucolick.org = jnaiman
 migueld.deval = miguel at archlinux.net
 slevy at ncsa.illinois.edu = salevy at illinois.edu
-malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file
+malzraa at gmail.com = kellerbw at mcmaster.ca
+None = convert-repo
+dfenn = df11c at my.fsu.edu
+langmm = langmm.astro at gmail.com
+jmt354 = jmtomlinson95 at gmail.com
+desika = dnarayan at haverford.edu
+Ben Thompson = bthompson2090 at gmail.com
+goldbaum at ucolick.org = ngoldbau at illinois.edu
+ngoldbau at ucsc.edu = ngoldbau at illinois.edu

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -45,9 +45,11 @@
 yt/utilities/lib/mesh_intersection.cpp
 yt/utilities/lib/mesh_samplers.cpp
 yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_triangulation.c
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/pixelization_routines.c

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -651,7 +651,7 @@
 .. _multiple-PRs:
 
 Working with Multiple BitBucket Pull Requests
-+++++++++++++++++++++++++++++++++++++++++++++
+---------------------------------------------
 
 Once you become active developing for yt, you may be working on
 various aspects of the code or bugfixes at the same time.  Currently,

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -10,6 +10,7 @@
                 Alex Bogert (fbogert at ucsc.edu)
                 André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
+                Yi-Hao Chen (yihaochentw at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
@@ -25,10 +26,12 @@
                 William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
+                David Hannasch (David.A.Hannasch at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
                 Anni Järvenpää (anni.jarvenpaa at gmail.com)
                 Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Maximilian Katz (maximilian.katz at stonybrook.edu)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
@@ -60,6 +63,7 @@
                 Anna Rosen (rosen at ucolick.org)
                 Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
+                Hsi-Yu Schive (hyschive at gmail.com)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
                 Pat Shriwise (shriwise at wisc.edu)
@@ -75,6 +79,7 @@
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
                 Robert Thompson (rthompsonj at gmail.com)
+                Joseph Tomlinson (jmtomlinson95 at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 appveyor.yml
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,38 @@
+# AppVeyor.com is a Continuous Integration service to build and run tests under
+# Windows
+
+environment:
+
+  global:
+      PYTHON: "C:\\Miniconda-x64"
+
+  matrix:
+
+      - PYTHON_VERSION: "2.7"
+
+      - PYTHON_VERSION: "3.5"
+
+
+platform:
+    -x64
+
+install:
+    - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+
+    # Install the build and runtime dependencies of the project.
+    # Create a conda environment
+    - "conda create -q --yes -n test python=%PYTHON_VERSION%"
+    - "activate test"
+
+    # Check that we have the expected version of Python
+    - "python --version"
+
+    # Install specified version of numpy and dependencies
+    - "conda install -q --yes numpy nose setuptools ipython Cython sympy h5py matplotlib"
+    - "python setup.py develop"
+
+# Not a .NET project
+build: false
+
+test_script:
+  - "nosetests -e test_all_fields ."

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -478,7 +478,8 @@
 
    import yt
    import numpy as np
-   from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
+   from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
+   from yt.units import mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
    import aplpy

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -32,7 +32,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0)
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
 
 Additional keyword arguments are:
 
@@ -49,7 +49,8 @@
 
  * **constant_metallicity** (*float*): If specified, assume a constant
    metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this.  Default: None.
+   must be set to False to use this. It should be given in unit of solar metallicity.
+   Default: None.
 
 The resulting fields can be used like all normal fields. The function will return the names of
 the created fields in a Python list.
@@ -60,7 +61,7 @@
   from yt.analysis_modules.spectral_integrator.api import \
        add_xray_emissivity_field
 
-  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
+  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
 
   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
   plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -14,14 +14,126 @@
 out of favor, as these discrete fields can be any type of sparsely populated
 data.
 
+What are fields?
+----------------
+
+Fields in yt are denoted by a two-element tuple, of the form ``(field_type,
+field_name)``. The first element, the "field type" is a category for a
+field. Possible field types used in yt include *gas* (for fluid mesh fields
+defined on a mesh) or *io* (for fields defined at particle locations). Field
+types can also correspond to distinct particle of fluid types in a single
+simulation. For example, a plasma physics simulation using the Particle in Cell
+method might have particle types corresponding to *electrons* and *ions*. See
+:ref:`known-field-types` below for more info about field types in yt.
+
+The second element of field tuples, the "field name", denotes the specific field
+to select, given the field type. Possible field names include *density*,
+*velocity_x* or *pressure* --- these three fields are examples of field names
+that might be used for a fluid defined on a mesh. Examples of particle fields
+include *particle_mass*, *particle_position*, or *particle_velocity_x*. In
+general, particle field names are prefixed by "particle\_", which makes it easy
+to distinguish between a particle field or a mesh field when no field type is
+provided.
+
+What fields are available?
+--------------------------
+
+We provide a full list of fields that yt recognizes by default at
+:ref:`field-list`.  If you want to create additional custom derived fields,
+see :ref:`creating-derived-fields`.
+
+Every dataset has an attribute, ``ds.fields``.  This attribute possesses
+attributes itself, each of which is a "field type," and each field type has as
+its attributes the fields themselves.  When one of these is printed, it returns
+information about the field and things like units and so on.  You can use this
+for tab-completing as well as easier access to information.
+
+As an example, you might browse the available fields like so:
+
+.. code-block:: python
+
+  print(dir(ds.fields))
+  print(dir(ds.fields.gas))
+  print(ds.fields.gas.density)
+
+On an Enzo dataset, the result from the final command would look something like
+this:::
+
+  Alias Field for "('enzo', 'Density')" (gas, density): (units: g/cm**3)
+
+You can use this to easily explore available fields, particularly through
+tab-completion in Jupyter/IPython.
+
+For a more programmatic method of accessing fields, you can utilize the
+``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
+information about fields.  The full list of fields available for a dataset can
+be found as the attribute ``field_list`` for native, on-disk fields and
+``derived_field_list`` for derived fields (``derived_field_list`` is a superset
+of ``field_list``).  You can view these lists by examining a dataset like this:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print(ds.field_list)
+   print(ds.derived_field_list)
+
+By using the ``field_info()`` class, one can access information about a given
+field, like its default units or the source code for it.
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   ds.index
+   print(ds.field_info["gas", "pressure"].get_units())
+   print(ds.field_info["gas", "pressure"].get_source())
+
+Using fields to access data
+---------------------------
+
+The primary *use* of fields in yt is to access data from a dataset. For example,
+if I want to use a data object (see :ref:`Data-objects` for more detail about
+data objects) to access the ``('gas', 'density')`` field, one can do any of the
+following:
+
+.. code-block:: python
+
+    ad = ds.all_data()
+
+    # just a field name
+    density = ad['density']
+
+    # field tuple with no parentheses
+    density = ad['gas', 'density']
+
+    # full field tuple
+    density = ad[('gas', 'density')]
+
+    # through the ds.fields object
+    density = ad[ds.fields.gas.density]
+
+The first data access example is the simplest. In that example, the field type
+is inferred from the name of the field. The next two examples use the field type
+explicitly, this might be necessary if there is more than one field type with a
+"density" field defined in the same dataset. The third example is slightly more
+verbose but is syntactically identical to the second example due to the way
+indexing works in the Python language.
+
+The final example uses the ``ds.fields`` object described above. This way of
+accessing fields lends itself to interactive use, especially if you make heavy
+use of IPython's tab completion features. Any of these ways of denoting the
+``('gas', 'density')`` field can be used when supplying a field name to a yt
+data object, analysis routines, or plotting and visualization function.
+
+Accessing Fields without a Field Type
+-------------------------------------
+
 In previous versions of yt, there was a single mechanism of accessing fields on
-a data container -- by their name, which was mandated to be a single string,
-and which often varied between different code frontends.  yt 3.0 allows
-for datasets containing multiple different types of fluid fields, mesh fields,
-particles (with overlapping or disjoint lists of fields).  To enable accessing
-these fields in a meaningful, simple way, the mechanism for accessing them has
-changed to take an optional *field type* in addition to the *field name* of
-the form ('*field type*', '*field name*').
+a data container -- by their name, which was mandated to be a single string, and
+which often varied between different code frontends.  yt 3.0 allows for datasets
+containing multiple different types of fluid fields, mesh fields, particles
+(with overlapping or disjoint lists of fields). However, to preserve backward
+compatibility and make interactive use simpler, yt will still accept field names
+given as a string and will try to infer the field type given a field name.
 
 As an example, we may be in a situation where have multiple types of particles
 which possess the ``particle_position`` field.  In the case where a data
@@ -30,9 +142,9 @@
 
 .. code-block:: python
 
-   print(ad["humans", "particle_position"])
-   print(ad["dogs", "particle_position"])
-   print(ad["dinosaurs", "particle_position"])
+   print(ad["dark_matter", "particle_position"])
+   print(ad["stars", "particle_position"])
+   print(ad["black_holes", "particle_position"])
 
 Each of these three fields may have different sizes.  In order to enable
 falling back on asking only for a field by the name, yt will use the most
@@ -45,7 +157,8 @@
 
    print(ad["particle_velocity"])
 
-it would select ``dinosaurs`` as the field type.
+it would select ``black_holes`` as the field type, since the last field accessed
+used that field type.
 
 The same operations work for fluid and mesh fields.  As an example, in some
 cosmology simulations, we may want to examine the mass of particles in a region
@@ -189,58 +302,6 @@
  * Species fields, such as for chemistry species (yt can recognize the entire
    periodic table in field names and construct ionization fields as need be)
 
-What fields are available?
---------------------------
-
-We provide a full list of fields that yt recognizes by default at
-:ref:`field-list`.  If you want to create additional custom derived fields,
-see :ref:`creating-derived-fields`.
-
-Every dataset has an attribute, ``ds.fields``.  This attribute possesses
-attributes itself, each of which is a "field type," and each field type has as
-its attributes the fields themselves.  When one of these is printed, it returns
-information about the field and things like units and so on.  You can use this
-for tab-completing as well as easier access to information.
-
-As an example, you might browse the available fields like so:
-
-.. code-block:: python
-
-  print(dir(ds.fields))
-  print(dir(ds.fields.gas))
-  print(ds.fields.gas.density)
-
-On an Enzo dataset, the result from the final command would look something like
-this:::
-
-  Alias Field for "('enzo', 'Density')" (gas, density): (units: g/cm**3)
-
-You can use this to easily explore available fields, particularly through
-tab-completion in Jupyter/IPython.
-
-For a more programmatic method of accessing fields, you can utilize the
-``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
-information about fields.  The full list of fields available for a dataset can
-be found as the attribute ``field_list`` for native, on-disk fields and
-``derived_field_list`` for derived fields (``derived_field_list`` is a superset
-of ``field_list``).  You can view these lists by examining a dataset like this:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print(ds.field_list)
-   print(ds.derived_field_list)
-
-By using the ``field_info()`` class, one can access information about a given
-field, like its default units or the source code for it.
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   ds.index
-   print(ds.field_info["gas", "pressure"].get_units())
-   print(ds.field_info["gas", "pressure"].get_source())
-
 .. _bfields:
 
 Magnetic Fields

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -155,7 +155,7 @@
    "outputs": [],
    "source": [
     "from yt.units.yt_array import YTQuantity\n",
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "from numpy.random import random\n",
     "import numpy as np\n",
     "\n",
@@ -446,7 +446,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G, kboltz\n",
+    "from yt.units import G, kboltz\n",
     "\n",
     "print (\"Newton's constant: \", G)\n",
     "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -467,7 +467,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import kboltz\n",
+    "from yt.units import kboltz\n",
     "kb = kboltz.to_astropy()"
    ]
   },

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -41,7 +41,7 @@
     "print (dd[\"temperature\"].to_equivalent(\"eV\", \"thermal\"))\n",
     "\n",
     "# Rest energy of the proton\n",
-    "from yt.utilities.physical_constants import mp\n",
+    "from yt.units import mp\n",
     "E_p = mp.to_equivalent(\"GeV\", \"mass_energy\")\n",
     "print (E_p)"
    ]
@@ -61,7 +61,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import clight\n",
+    "from yt.units import clight\n",
     "v = 0.1*clight\n",
     "g = v.to_equivalent(\"dimensionless\", \"lorentz\")\n",
     "print (g)\n",
@@ -166,7 +166,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+    "from yt.units import qp # the elementary charge in esu\n",
     "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
     "print (qp)\n",
     "print (qp_SI)"

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -324,7 +324,7 @@
    },
    "outputs": [],
    "source": [
-    "from yt.utilities.physical_constants import G\n",
+    "from yt.units import G\n",
     "print (G.in_base(\"mks\"))"
    ]
   },

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -3,8 +3,8 @@
 from yt.analysis_modules.cosmological_observation.api import \
     LightRay
 
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-lr = LightRay(fn)
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+lr = LightRay(ds)
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
@@ -17,7 +17,6 @@
 
 # Optionally, we can now overplot this ray on a projection of the source
 # dataset
-ds = yt.load(fn)
 p = yt.ProjectionPlot(ds, 'z', 'density')
 p.annotate_ray(lr)
 p.save()

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -15,6 +15,26 @@
 import subprocess
 
 
+def run_with_capture(*args, **kwargs):
+    sp = subprocess.Popen(*args,
+                          stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          **kwargs)
+    out, err = sp.communicate()
+    if out:
+        sys.stdout.write(out.decode("UTF-8"))
+    if err:
+        sys.stderr.write(err.decode("UTF-8"))
+
+    if sp.returncode != 0:
+        retstderr = " ".join(args[0])
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(sp.returncode, retstderr)
+
+    return sp.returncode
+
+
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
 BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
@@ -37,10 +57,16 @@
 
 def check_recipe(cmd):
     '''Run single recipe'''
-    try:
-        subprocess.check_call(cmd)
-        result = True
-    except subprocess.CalledProcessError as e:
-        print(("Stdout output:\n", e.output))
-        result = False
-    assert result
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+    if out:
+        sys.stdout.write(out.decode("utf8"))
+    if err:
+        sys.stderr.write(err.decode("utf8"))
+
+    if proc.returncode != 0:
+        retstderr = " ".join(cmd)
+        retstderr += "\n\nTHIS IS THE REAL CAUSE OF THE FAILURE:\n" 
+        retstderr += err.decode("UTF-8") + "\n"
+        raise subprocess.CalledProcessError(proc.returncode, retstderr)

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -85,6 +85,7 @@
 cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
+# Recommended resolution for YouTube 360-degree videos is [3840, 2160]
 cam.resolution = [500, 250]
 # Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
 # from this point in spherical coordinate.
@@ -99,9 +100,11 @@
 
 # Stereo-spherical lens
 cam = sc.add_camera(ds, lens_type='stereo-spherical')
-# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
-# will generate the final image with both left-eye and right-eye ones jointed together.
-cam.resolution = [1000, 250]
+# Set the size ratio of the final projection to be 1:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together,
+# with left-eye image on top and right-eye image on bottom.
+# Recommended resolution for YouTube virtual reality videos is [3840, 2160]
+cam.resolution = [500, 500]
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -71,9 +71,9 @@
 a dimensionless float or array.
 
 If your field definition includes physical constants rather than defining a
-constant as a float, you can import it from ``yt.utilities.physical_constants``
+constant as a float, you can import it from ``yt.units``
 to get a predefined version of the constant with the correct units. If you know
-the units your data is supposed to have ahead of time, you can import unit
+the units your data is supposed to have ahead of time, you can also import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
 return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -34,7 +34,8 @@
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
 To get started, make a new directory in ``yt/frontends`` with the name
-of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+of your code and add the name into ``yt/frontends/api.py``.
+Copying the contents of the ``yt/frontends/_skeleton``
 directory will add a lot of boilerplate for the required classes and
 methods that are needed.  In particular, you'll have to create a
 subclass of ``Dataset`` in the data_structures.py file. This subclass

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -95,17 +95,17 @@
 To create new unit tests:
 
 #. Create a new ``tests/`` directory next to the file containing the
-   functionality you want to test.  Be sure to add this new directory as a
-   subpackage in the setup.py script located in the directory you're adding a
-   new ``tests/`` folder to.  This ensures that the tests will be deployed in
-   yt source and binary distributions.
+   functionality you want to test and add an empty ``__init__.py`` file to
+   it.
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a tuple of the form
-   ``function``, ``argument_one``, ``argument_two``, etc.  For example
-   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
-   asserts that 1.0 is equal to 1.0.
+   accept no arguments. The test function should do some work that tests some
+   functionality and should also verify that the results are correct using
+   assert statements or functions.  
+# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+   ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
+   captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -245,6 +245,12 @@
 * ``IsothermalCollapse/snap_505.hdf5``
 * ``GadgetDiskGalaxy/snapshot_200.hdf5``
 
+GAMER
+~~~~~~
+
+* ``InteractingJets/jet_000002``
+* ``WaveDarkMatter/psiDM_000020``
+
 Halo Catalog
 ~~~~~~~~~~~~
 
@@ -464,9 +470,9 @@
        test.prefix = "my_unique_name"
 
        # this ensures a nice test name in nose's output
-       test_my_ds.__description__ = test.description
+       test_my_ds.__name__ = test.description
 
-       yield test_my_ds
+       yield test
 
 Another good example of an image comparison test is the
 ``PlotWindowAttributeTest`` defined in the answer testing framework and used in
@@ -532,7 +538,13 @@
 
       local_pw_000:
 
-would regenerate answers for OWLS frontend.
+would regenerate answers for OWLS frontend. 
+
+When adding tests to an existing set of answers (like ``local_owls_000`` or ``local_varia_000``), 
+it is considered best practice to first submit a pull request adding the tests WITHOUT incrementing 
+the version number. Then, allow the tests to run (resulting in "no old answer" errors for the missing
+answers). If no other failures are present, you can then increment the version number to regenerate
+the answers. This way, we can avoid accidently covering up test breakages. 
 
 Adding New Answer Tests
 ~~~~~~~~~~~~~~~~~~~~~~~

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -775,32 +775,38 @@
 ----------
 
 FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``yt.load`` command and provide it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
+FLASH dataset, you can use the ``yt.load`` command and provide it the file name of 
+a plot file, checkpoint file, or particle file. Particle files require special handling
+depending on the situation, the main issue being that they typically lack grid information. 
+The first case is when you have a plotfile and a particle file that you would like to 
+load together. In the simplest case, this occurs automatically. For instance, if you
+were in a directory with the following files:
 
 .. code-block:: none
 
-   cosmoSim_coolhdf5_chk_0026
+   radio_halo_1kpc_hdf5_plt_cnt_0100 # plotfile
+   radio_halo_1kpc_hdf5_part_0100 # particle file
 
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
+where the plotfile and the particle file were created at the same time (therefore having 
+particle data consistent with the grid structure of the former). Notice also that the 
+prefix ``"radio_halo_1kpc_"`` and the file number ``100`` are the same. In this special case,
+the particle file will be loaded automatically when ``yt.load`` is called on the plotfile.
+This also works when loading a number of files in a time series.
 
-.. code-block:: python
-
-   import yt
-   ds = yt.load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
+If the two files do not have the same prefix and number, but they nevertheless have the same
+grid structure and are at the same simulation time, the particle data may be loaded with the
+``particle_filename`` optional argument to ``yt.load``:
 
 .. code-block:: python
 
     import yt
     ds = yt.load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
 
+However, if you don't have a corresponding plotfile for a particle file, but would still 
+like to load the particle data, you can still call ``yt.load`` on the file. However, the 
+grid information will not be available, and the particle data will be loaded in a fashion
+similar to SPH data. 
+
 .. rubric:: Caveats
 
 * Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
@@ -1021,6 +1027,34 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+GAMER Data
+----------
+
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("InteractingJets/jet_000002")
+
+Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
+you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+   code_units = { "length_unit":(1.0,"kpc"),
+                  "time_unit"  :(3.08567758096e+13,"s"),
+                  "mass_unit"  :(1.4690033e+36,"g") }
+   ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
+
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
+e.g., ``("gamer","Dens")``, will be in code units.
+
+.. rubric:: Caveats
+
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+
 .. _loading-amr-data:
 
 Generic AMR Data
@@ -1285,6 +1319,29 @@
 ``bbox``
        The bounding box for the particle positions.
 
+.. _loading-gizmo-data:
+
+Gizmo Data
+----------
+
+Gizmo datasets, including FIRE outputs, can be loaded into yt in the usual 
+manner.  Like other SPH data formats, yt loads Gizmo data as particle fields 
+and then uses smoothing kernels to deposit those fields to an underlying 
+grid structure as spatial fields as described in :ref:`loading-gadget-data`.  
+To load Gizmo datasets using the standard HDF5 output format::
+
+   import yt
+   ds = yt.load("snapshot_600.hdf5")
+
+Because the Gizmo output format is similar to the Gadget format, yt
+may load Gizmo datasets as Gadget depending on the circumstances, but this
+should not pose a problem in most situations.  FIRE outputs will be loaded 
+accordingly due to the number of metallicity fields found (11 or 17).  
+
+For Gizmo outputs written as raw binary outputs, you may have to specify
+a bounding box, field specification, and units as are done for standard 
+Gadget outputs.  See :ref:`loading-gadget-data` for more information.
+
 .. _loading-pyne-data:
 
 Halo Catalog Data

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -232,3 +232,28 @@
 whatever interface they wish for displaying and saving their image data.
 You can use the :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
 to accomplish this as described in :ref:`fixed-resolution-buffers`.
+
+High-level Information about Particles
+--------------------------------------
+
+There are a number of high-level helpers attached to ``Dataset`` objects to find
+out information about the particles in an output file. First, one can check if
+there are any particles in a dataset at all by examining
+``ds.particles_exist``. This will be ``True`` for datasets the include particles
+and ``False`` otherwise.
+
+One can also see which particle types are available in a dataset. Particle types
+that are available in the dataset's on-disk output are known as "raw" particle
+types, and they will appear in ``ds.particle_types_raw``. Particle types that
+are dynamically defined via a particle filter of a particle union will also
+appear in the ``ds.particle_types`` list. If the simulation only has one
+particle type on-disk, its name will by ``'io'``. If there is more than one
+particle type, the names of the particle types will be inferred from the output
+file. For example, Gadget HDF5 files have particle type names like ``PartType0``
+and ``PartType1``, while Enzo data, which usually only has one particle type,
+will only have a particle named ``io``.
+
+Finally, one can see the number of each particle type by inspecting
+``ds.particle_type_counts``. This will be a dictionary mappying the names of
+particle types in ``ds.particle_types_raw`` to the number of each particle type
+in a simulation output.

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -18,26 +18,27 @@
 
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash all-in-one installation script.  This builds
-  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific
-  python environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
+  will probably want to use the bash all-in-one installation script.  This
+  creates a python environment using the `miniconda python
+  distrubtion <http://conda.pydata.org/miniconda.html>`_ and the
+  `conda <http://conda.pydata.org/docs/>`_ package manager inside of a single
+  folder in your home directory. See :ref:`install-script` for more details.
 
 * If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
+  distribution and already have ``conda`` installed, see
+  :ref:`anaconda-installation` for details on how to install yt using the
+  ``conda`` package manager. Note that this is currently the only supported
+  installation mechanism on Windows.
 
-* If you already have a scientific python software stack installed on your
-  computer and are comfortable installing python packages,
+* If you want to build a development version of yt or are comfortable with
+  compilers and know your way around python packaging,
   :ref:`source-installation` will probably be the best choice. If you have set
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
-  let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via Linux package managers so long as you
-  have the necessary compilers installed (e.g. the ``build-essentials``
-  package on Debian and Ubuntu).
+  let you install yt using the python installed by the package
+  manager. Similarly, this will also work for python environments set up via
+  Linux package managers so long as you have the necessary compilers installed
+  (e.g. the ``build-essentials`` package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -53,19 +54,21 @@
 Before you install yt, you must decide which branch (i.e. version) of the code
 you prefer to use:
 
-* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+* ``yt`` -- The most up-to-date *development* version with the most current
+  features but sometimes unstable (the development version of the next ``yt-3.x``
+  release).
+* ``stable`` -- The latest stable release of ``yt-3.x``.
+* ``yt-2.x`` -- The last stable release of ``yt-2.x``.
 
-If this is your first time using the code, we recommend using ``stable``,
-unless you specifically need some piece of brand-new functionality only
-available in ``yt`` or need to run an old script developed for ``yt-2.x``.
-There were major API and functionality changes made in yt after version 2.7
-in moving to version 3.0.  For a detailed description of the changes
-between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and
-``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked
-into one branch when you install yt, because you can easily change the active
-branch by following the instructions in :ref:`switching-between-yt-versions`.
+If this is your first time using the code, we recommend using ``stable``, unless
+you specifically need some piece of brand-new functionality only available in
+``yt`` or need to run an old script developed for ``yt-2.x``.  There were major
+API and functionality changes made in yt for version 3.0.  For a detailed
+description of the changes between versions 2.x (e.g. branch ``yt-2.x``) and 3.x
+(e.g. branches ``yt`` and ``stable``) see :ref:`yt3differences`.  Lastly, don't
+feel like you're locked into one branch when you install yt, because you can
+easily change the active branch by following the instructions in
+:ref:`switching-between-yt-versions`.
 
 .. _install-script:
 
@@ -74,9 +77,8 @@
 
 Because installation of all of the interlocking parts necessary to install yt
 itself can be time-consuming, yt provides an all-in-one installation script
-which downloads and builds a fully-isolated Python + NumPy + Matplotlib + HDF5 +
-Mercurial installation. Since the install script compiles yt's dependencies from
-source, you must have C, C++, and optionally Fortran compilers installed.
+which downloads and builds a fully-isolated installation of Python that includes
+NumPy, Matplotlib, H5py, Mercurial, and yt.
 
 The install script supports UNIX-like systems, including Linux, OS X, and most
 supercomputer and cluster environments. It is particularly suited for deployment
@@ -94,30 +96,62 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To get the installation script for the ``stable`` branch of the code,
-download it from:
+download it using the following command:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-If you wish to install a different version of yt (see
-:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate
-branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct
-install script.
-
-By default, the bash install script will install an array of items, but there
-are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
-etc.). The script has all of these options at the top of the file. You should be
-able to open it and edit it without any knowledge of bash syntax.  To execute
-it, run:
+If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  bash install_script.sh
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+
+If you wish to install a different version of yt (see :ref:`branches-of-yt`),
+replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
+the path above to get the correct install script.
+
+By default, the bash install script will create a python environment based on
+the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,
+and will install yt's dependencies using the `conda
+<http://conda.pydata.org/docs/>`_ package manager. To avoid needing a
+compilation environment to run the install script, yt itself will also be
+installed using `conda`.
+
+If you would like to customize your yt installation, you can edit the values of
+several variables that are defined at the top of the script.
+
+If you would like to build yt from source, you will need to edit the install
+script and set ``INST_YT_SOURCE=1`` near the top. This will clone a copy of the
+yt mercurial repository and build yt form source. The default is
+``INST_YT_SOURCE=0``, which installs yt from a binary conda package.
+
+The install script can also build python and all yt dependencies from source. To
+switch to this mode, set ``INST_CONDA=0`` at the top of the install script. If
+you choose this mode, you must also set ``INST_YT_SOURCE=1``.
+
+In addition, you can tell the install script to download and install some
+additional packages --- currently these include
+`PyX <http://pyx.sourceforge.net/>`_, the `Rockstar halo
+finder <http://arxiv.org/abs/1110.4372>`_, `SciPy <https://www.scipy.org/>`_,
+`Astropy <http://www.astropy.org/>`_, and the necessary dependencies for
+:ref:`unstructured mesh rendering <unstructured_mesh_rendering>`. The script has
+all of the options for installing optional packages near the top of the
+file. You should be able to open it and edit it without any knowledge of bash
+syntax. For example, to install scipy, change ``INST_SCIPY=0`` to
+``INST_SCIPY=1``.
+
+To execute the install script, run:
+
+.. code-block:: bash
+
+  $ bash install_script.sh
 
 Because the installer is downloading and building a variety of packages from
-source, this will likely take a while (e.g. 20 minutes), but you will get
-updates of its status at the command line throughout.
+source, this will likely take a few minutes, especially if you have a slow
+internet connection or have ``INST_CONDA=0`` set. You will get updates of its
+status at the command prompt throughout.
 
 If you receive errors during this process, the installer will provide you
 with a large amount of information to assist in debugging your problems.  The
@@ -127,26 +161,63 @@
 potentially figure out what went wrong.  If you have problems, though, do not
 hesitate to :ref:`contact us <asking-for-help>` for assistance.
 
+If the install script errors out with a message about being unable to import the
+python SSL bindings, this means that the Python built by the install script was
+unable to link against the OpenSSL library. This likely means that you installed
+with ``INST_CONDA=0`` on a recent version of OSX, or on a cluster that has a
+very out of date installation of OpenSSL. In both of these cases you will either
+need to install OpenSSL yourself from the system package manager or consider
+using ``INST_CONDA=1``, since conda-based installs can install the conda package
+for OpenSSL.
+
 .. _activating-yt:
 
 Activating Your Installation
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Once the installation has completed, there will be instructions on how to set up
-your shell environment to use yt by executing the activate script.  You must
-run this script in order to have yt properly recognized by your system.  You can
-either add it to your login script, or you must execute it in each shell session
-prior to working with yt.
+your shell environment to use yt.  
+
+Activating Conda-based installs (``INST_CONDA=1``)
+""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For conda-based installs, you will need to ensure that the installation's
+``yt-conda/bin`` directory is prepended to your ``PATH`` environment variable.
+
+For Bash-style shells, you can use the following command in a terminal session
+to temporarily activate the yt installation:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate
+  $ export PATH=/path/to/yt-conda/bin:$PATH
+
+and on csh-style shells:
+
+.. code-block:: csh
+
+  $ setenv PATH /path/to/yt-conda/bin:$PATH
+
+If you would like to permanently activate yt, you can also update the init file
+appropriate for your shell and OS (e.g. .bashrc, .bash_profile, .cshrc, .zshrc)
+to include the same command.
+
+Activating source-based installs (``INST_CONDA=0``)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+
+For this installation method, you must run an ``activate`` script to activate
+the yt environment in a terminal session. You must run this script in order to
+have yt properly recognized by your system.  You can either add it to your login
+script, or you must execute it in each shell session prior to working with yt.
+
+.. code-block:: bash
+
+  $ source <yt installation directory>/bin/activate
 
 If you use csh or tcsh as your shell, activate that version of the script:
 
 .. code-block:: bash
 
-  source <yt installation directory>/bin/activate.csh
+  $ source <yt installation directory>/bin/activate.csh
 
 If you don't like executing outside scripts on your computer, you can set
 the shell variables manually.  ``YT_DEST`` needs to point to the root of the
@@ -166,14 +237,21 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
-Additionally, if you want to make sure you have the latest dependencies
-associated with yt and update the codebase simultaneously, type this:
+Additionally, if you ran the install script with ``INST_CONDA=0`` and want to
+make sure you have the latest dependencies associated with yt and update the
+codebase simultaneously, type this:
 
 .. code-block:: bash
 
-  yt update --all
+  $ yt update --all
+
+If you ran the install script with ``INST_CONDA=1`` and want to update your dependencies, run:
+
+.. code-block:: bash
+
+  $ conda update --all
 
 .. _removing-yt:
 
@@ -192,35 +270,26 @@
 Installing yt Using Anaconda
 ++++++++++++++++++++++++++++
 
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...``
-script for your platform and system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-latest-Linux-x86_64.sh
-
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  conda install yt
+  $ conda install yt
 
 which will install stable branch of yt along with all of its dependencies.
 
+.. _nightly-conda-builds:
+
+Nightly Conda Builds
+^^^^^^^^^^^^^^^^^^^^
+
 If you would like to install latest development version of yt, you can download
 it from our custom anaconda channel:
 
 .. code-block:: bash
 
-  conda install -c http://use.yt/with_conda/ yt
+  $ conda install -c http://use.yt/with_conda/ yt
 
 New packages for development branch are built after every pull request is
 merged. In order to make sure you are running latest version, it's recommended
@@ -228,28 +297,26 @@
 
 .. code-block:: bash
 
-  conda update -c http://use.yt/with_conda/ yt
+  $ conda update -c http://use.yt/with_conda/ yt
 
 Location of our channel can be added to ``.condarc`` to avoid retyping it during
 each *conda* invocation. Please refer to `Conda Manual
 <http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
 detailed instructions.
 
+.. _conda-source-build:
 
-Obtaining Source Code
-^^^^^^^^^^^^^^^^^^^^^
+Building yt from Source For Conda-based Installs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-There are two ways to get the yt source code when using an Anaconda
-installation.
-
-Option 1:
-
-Ensure that you have all build dependencies installed in your current
+First, ensure that you have all build dependencies installed in your current
 conda environment:
 
 .. code-block:: bash
 
-  conda install cython mercurial sympy ipython h5py matplotlib
+  $ conda install cython mercurial sympy ipython matplotlib
+
+In addition, you will need a C compiler installed.
 
 .. note::
   
@@ -260,87 +327,124 @@
 
   .. code-block:: bash
 
-     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
-     conda create -y -n py27 python=2.7 mercurial
-     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+   $ export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+   $ conda create -y -n py27 python=2.7 mercurial
+   $ ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
 
 Clone the yt repository with:
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Once inside the yt directory, update to the appropriate branch and
-run ``setup.py``. For example, the following commands will allow you
+run ``setup.py develop``. For example, the following commands will allow you
 to see the tip of the development branch.
 
 .. code-block:: bash
 
-  hg up yt
-  python setup.py develop
+  $ hg pull
+  $ hg update yt
+  $ python setup.py develop
 
 This will make sure you are running a version of yt corresponding to the
 most up-to-date source code.
 
-Option 2:
+.. _rockstar-conda:
 
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
+Rockstar Halo Finder for Conda-based installations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to set rockstar up in a conda-based python envrionment is to run
+the install script with both ``INST_CONDA=1`` and ``INST_ROCKSTAR=1``.
+
+If you want to do this manually, you will need to follow these
+instructions. First, clone Matt Turk's fork of rockstar and compile it:
 
 .. code-block:: bash
 
-  git clone https://github.com/conda/conda-recipes
+  $ hg clone https://bitbucket.org/MatthewTurk/rockstar
+  $ cd rockstar
+  $ make lib
 
-Then navigate to the repository root and invoke ``conda build``:
+Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
 
 .. code-block:: bash
 
-  cd conda-recipes
-  conda build ./yt/
+  $ cp librockstar.so /path/to/anaconda/lib
 
-Note that building a yt conda package requires a C compiler.
+Finally, you will need to recompile yt to enable the rockstar interface. Clone a
+copy of the yt mercurial repository (see :ref:`conda-source-build`), or navigate
+to a clone that you have already made, and do the following:
+
+.. code-block:: bash
+
+  $ cd /path/to/yt-hg
+  $ ./clean.sh
+  $ echo /path/to/rockstar > rockstar.cfg
+  $ python setup.py develop
+
+Here ``/path/to/yt-hg`` is the path to your clone of the yt mercurial repository
+and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
+rockstar.
+
+Finally, to actually use rockstar, you will need to ensure the folder containing
+`librockstar.so` is in your LD_LIBRARY_PATH:
+
+.. code-block:: bash
+
+  $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
+
+You should now be able to enter a python session and import the rockstar
+interface:
+
+.. code-block:: python
+
+  >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
+
+If this python import fails, then you have not installed rockstar and yt's
+rockstar interface correctly.
 
 .. _windows-installation:
 
 Installing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-Installation on 64-bit Microsoft Windows platforms is supported using Anaconda (see
-:ref:`anaconda-installation`). Also see :ref:`windows-developing` for details on how to build yt
-from source in Windows.
+Installation on 64-bit Microsoft Windows platforms is supported using Anaconda
+(see :ref:`anaconda-installation`). Also see :ref:`windows-developing` for
+details on how to build yt from source in Windows.
 
 .. _source-installation:
 
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
+Installing yt Using ``pip`` or From Source
+++++++++++++++++++++++++++++++++++++++++++
+
+.. note::
+
+  If you wish to install yt from source in a conda-based installation of yt,
+  see :ref:`conda-source-build`.
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.
+installed on your system. Right now, the dependencies to build yt from
+source include:
 
-If you use a Linux OS, use your distro's package manager to install these yt
-dependencies on your system:
+- ``mercurial``
+- A C compiler such as ``gcc`` or ``clang``
+- ``Python 2.7``, ``Python 3.4``, or ``Python 3.5``
 
-- ``HDF5``
-- ``zeromq``
-- ``sqlite``
-- ``mercurial``
-
-Then install the required Python packages with ``pip``:
+In addition, building yt from source requires several python packages
+which can be installed with ``pip``:
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython h5py nose sympy
+  $ pip install numpy matplotlib cython sympy
 
-If you're using IPython notebooks, you can install its dependencies
-with ``pip`` as well:
+You may also want to install some of yt's optional dependencies, including
+``jupyter``, ``h5py`` (which in turn depends on the HDF5 library), ``scipy``, or
+``astropy``,
 
-.. code-block:: bash
-
-  $ pip install ipython[notebook]
-
-From here, you can use ``pip`` (which comes with ``Python``) to install the latest
-stable version of yt:
+From here, you can use ``pip`` (which comes with ``Python``) to install the
+latest stable version of yt:
 
 .. code-block:: bash
 
@@ -353,46 +457,30 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user --prefix=
 
 .. note::
 
-  If you maintain your own user-level python installation separate from the OS-level python
-  installation, you can leave off ``--user --prefix=``, although you might need
-  ``sudo`` depending on where python is installed. See `This StackOverflow
-  discussion
+  If you maintain your own user-level python installation separate from the
+  OS-level python installation, you can leave off ``--user --prefix=``, although
+  you might need ``sudo`` depending on where python is installed. See `This
+  StackOverflow discussion
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
   if you are curious why ``--prefix=`` is neccessary on some systems.
 
-.. note::
-
-   yt requires version 18.0 or higher of ``setuptools``. If you see
-   error messages about this package, you may need to update it. For
-   example, with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade setuptools
-
-   or your preferred method. If you have ``distribute`` installed, you
-   may also see error messages for it if it's out of date. You can
-   update with pip via
-
-   .. code-block:: bash
-
-      pip install --upgrade distribute
-
-   or via your preferred method.
-   
-
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
 ``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
 the ``setuptools`` documentation for the additional options.
 
+If you are unable to locate the ``yt`` executable (i.e. ``yt version`` failes),
+then you likely need to add the ``$HOME/.local/bin`` (or the equivalent on your
+OS) to your PATH. Some linux distributions do not include this directory in the
+default search path.
+
 If you choose this installation method, you do not need to run any activation
 script since this will install yt into your global python environment.
 
@@ -401,15 +489,35 @@
 
 .. code-block:: bash
 
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop --user --prefix=
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py develop --user --prefix=
 
 As above, you can leave off ``--user --prefix=`` if you want to install yt into the default
 package install path.  If you do not have write access for this location, you
 might need to use ``sudo``.
 
+Build errors with ``setuptools`` or ``distribute``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building yt requires version 18.0 or higher of ``setuptools``. If you see error
+messages about this package, you may need to update it. For example, with pip
+via
+
+.. code-block:: bash
+
+  $ pip install --upgrade setuptools
+
+or your preferred method. If you have ``distribute`` installed, you may also see
+error messages for it if it's out of date. You can update with pip via
+
+.. code-block:: bash
+
+  $ pip install --upgrade distribute
+
+or via your preferred method.   
+
 Keeping yt Updated via Mercurial
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -424,7 +532,7 @@
 
 .. code-block:: bash
 
-  yt update
+  $ yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
 any changes from Bitbucket, and then recompile yt if necessary.
@@ -439,7 +547,7 @@
 
 .. code-block:: bash
 
-  yt --help
+  $ yt --help
 
 If this works, you should get a list of the various command-line options for
 yt, which means you have successfully installed yt.  Congratulations!
@@ -453,21 +561,57 @@
 
 .. _switching-between-yt-versions:
 
-Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
----------------------------------------------------------
+Switching versions of yt: ``yt-2.x``, ``stable``, and ``yt`` branches
+---------------------------------------------------------------------
 
-With the release of version 3.0 of yt, development of the legacy yt 2.x series
-has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the foreseeable future.  This makes it easy to use scripts written
-for older versions of yt without substantially updating them to support the
-new field naming or unit systems in yt version 3.
+Here we explain how to switch between different development branches of yt. 
 
-Currently, the yt-2.x codebase is contained in a named branch in the yt
-mercurial repository.  Thus, depending on the method you used to install
-yt, there are different instructions for switching versions.
+If You Installed yt Using the Bash Install Script
++++++++++++++++++++++++++++++++++++++++++++++++++
 
-If You Installed yt Using the Installer Script
-++++++++++++++++++++++++++++++++++++++++++++++
+The instructions for how to switch between branches depend on whether you ran
+the install script with ``INST_YT_SOURCE=0`` (the default) or
+``INST_YT_SOURCE=1``. You can determine which option you used by inspecting the
+output:
+
+.. code-block:: bash
+
+  $ yt version 
+
+If the output from this command looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.2.3
+  ---
+
+i.e. it does not refer to a specific changeset hash, then you originally chose
+``INST_YT_SOURCE=0``.
+
+On the other hand, if the output from ``yt version`` looks like:
+
+.. code-block:: none
+
+  The current version and changeset for the code is:
+
+  ---
+  Version = 3.3-dev
+  Changeset = d8eec89b2c86 (yt) tip
+  ---
+
+i.e. it refers to a specific changeset in the yt mercurial repository, then
+you installed using ``INST_YT_SOURCE=1``.
+
+Conda-based installs (``INST_YT_SOURCE=0``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this case you can either install one of the nightly conda builds (see :ref:`nightly-conda-builds`), or you can follow the instructions above to build yt from source under conda (see :ref:`conda-source-build`).
+
+Source-based installs (``INST_YT_SOURCE=1``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
@@ -476,9 +620,9 @@
 
 .. code-block:: bash
 
-  cd yt-<machine>/src/yt-hg
-  hg update <desired-version>
-  python setup.py develop
+  $ cd yt-<machine>/src/yt-hg
+  $ hg update <desired-version>
+  $ python setup.py develop
 
 Valid versions to jump to are described in :ref:`branches-of-yt`.
 
@@ -494,8 +638,8 @@
 
 .. code-block:: bash
 
-  pip uninstall yt
-  hg clone https://bitbucket.org/yt_analysis/yt
+  $ pip uninstall yt
+  $ hg clone https://bitbucket.org/yt_analysis/yt
 
 Now, to switch between versions, you need to navigate to the root of
 the mercurial yt repository. Use mercurial to
@@ -503,9 +647,9 @@
 
 .. code-block:: bash
 
-  cd yt
-  hg update <desired-version>
-  python setup.py install --user --prefix=
+  $ cd yt
+  $ hg update <desired-version>
+  $ python setup.py install --user --prefix=
 
 Valid versions to jump to are described in :ref:`branches-of-yt`).
 

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/quickstart/2)_Data_Inspection.ipynb
--- a/doc/source/quickstart/2)_Data_Inspection.ipynb
+++ b/doc/source/quickstart/2)_Data_Inspection.ipynb
@@ -154,6 +154,35 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Finally, we can get basic information about the particle types and number of particles in a simulation:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (ds.particle_types)\n",
+    "print (ds.particle_types_raw)\n",
+    "print (ds.particle_type_counts)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For this dataset, we see that there are two particle types defined, (`io` and `all`), but that only one of these particle types in in `ds.particle_types_raw`. The `ds.particle_types` list contains *all* particle types in the simulation, including ones that are dynamically defined like particle unions. The `ds.particle_types_raw` list includes only particle types that are in the output file we loaded the dataset from.\n",
+    "\n",
+    "We can also see that there are a bit more than 1.1 million particles in this simulation. Only particle types in `ds.particle_types_raw` will appear in the `ds.particle_type_counts` dictionary."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "# Mesh Structure\n",
     "\n",
     "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -28,14 +28,18 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| FITS                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | FLASH                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| FITS                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
-+-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| Gizmo                 |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Maestro               |   Y [#f1]_ |     N     |      Y     |   Y   |    Y     |    Y     |     N      | Partial  |

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -692,6 +692,28 @@
    s.annotate_triangle_facets(points, plot_args={"colors": 'black'})
    s.save()
 
+.. _annotate-mesh-lines:
+
+Annotate Mesh Lines Callback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: annotate_mesh_lines(plot_args=None)
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.MeshLinesCallback`.)
+
+   This draws the mesh line boundaries over a plot using a Matplotlib
+   line collection. This callback is only useful for unstructured or 
+   semi-structured mesh datasets. 
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e')
+   sl = yt.SlicePlot(ds, 2, ('connect1', 'nodal_aux'))
+   sl.annotate_mesh_lines(plot_args={'color':'black'})
+   sl.save()
+
 .. _annotate-ray:
 
 Overplot the Path of a Ray

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -406,16 +406,14 @@
    import yt
    ds = yt.load('MOOSE_sample_data/out.e-s010')
    sl = yt.SlicePlot(ds, 'z', ('connect1', 'diffused'))
-   sl.annotate_mesh_lines(thresh=0.1)
+   sl.annotate_mesh_lines(plot_args={'color':'black'})
    sl.zoom(0.75)
    sl.save()
 
-This annotation is performed by marking the pixels where the mapped coordinate is close
-to the element boundary. What counts as 'close' (in the mapped coordinate system) is
-determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
-thinner.
+The ``plot_args`` parameter is a dictionary of keyword arguments that will be passed
+to matplotlib. It can be used to control the mesh line color, thickness, etc...
 
-The above example all involve 8-node hexahedral mesh elements. Here is another example from
+The above examples all involve 8-node hexahedral mesh elements. Here is another example from
 a dataset that uses 6-node wedge elements:
 
 .. python-script::

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -55,7 +55,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
@@ -113,24 +113,23 @@
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("redshift0058")
-   dd = ds.sphere("max", (200, "kpc"))
-   rho = 5e-27
+    import yt
+    from yt.units import kpc
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    dd = ds.sphere(ds.domain_center, (500, "kpc"))
+    rho = 1e-28
 
-   bounds = [(dd.center[i] - 100.0/ds['kpc'],
-              dd.center[i] + 100.0/ds['kpc']) for i in range(3)]
+    bounds = [[dd.center[i] - 250*kpc, dd.center[i] + 250*kpc] for i in range(3)]
 
-   surf = ds.surface(dd, "density", rho)
+    surf = ds.surface(dd, "density", rho)
 
-   upload_id = surf.export_sketchfab(
-       title = "RD0058 - 5e-27",
-       description = "Extraction of Density (colored by Temperature) at 5e-27 " \
-                   + "g/cc from a galaxy formation simulation by Ryan Joung."
-       color_field = "temperature",
-       color_map = "hot",
-       color_log = True,
-       bounds = bounds
+    upload_id = surf.export_sketchfab(
+        title="galaxy0030 - 1e-28",
+        description="Extraction of Density (colored by temperature) at 1e-28 g/cc",
+        color_field="temperature",
+        color_map="hot",
+        color_log=True,
+        bounds=bounds
    )
 
 and yt will extract a surface, convert to a format that Sketchfab.com
@@ -141,15 +140,13 @@
 
 .. raw:: html
 
-   <iframe frameborder="0" height="480" width="854" allowFullScreen
-   webkitallowfullscreen="true" mozallowfullscreen="true"
-   src="http://skfb.ly/l4jh2edcba?autostart=0&transparent=0&autospin=0&controls=1&watermark=1"></iframe>
+     <iframe width="640" height="480" src="https://sketchfab.com/models/ff59dacd55824110ad5bcc292371a514/embed" frameborder="0" allowfullscreen mozallowfullscreen="true" webkitallowfullscreen="true" onmousewheel=""></iframe>
 
 As a note, Sketchfab has a maximum model size of 50MB for the free account.
-50MB is pretty hefty, though, so it shouldn't be a problem for most needs.
-We're working on a way to optionally upload links to the Sketchfab models on
-the `yt Hub <https://hub.yt-project.org/>`_, but for now, if you want to share
-a cool model we'd love to see it!
+50MB is pretty hefty, though, so it shouldn't be a problem for most
+needs. Additionally, if you have an eligible e-mail address associated with a
+school or university, you can request a free professional account, which allows
+models up to 200MB. See https://sketchfab.com/education for details.
 
 OBJ and MTL Files
 -----------------
@@ -167,7 +164,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'
@@ -239,7 +236,7 @@
 
    import yt
 
-   ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    rho = [2e-27, 1e-27]
    trans = [1.0, 0.5]
    filename = './surfaces'

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,8 +31,8 @@
 as grid or continent lines, and then to render a production-quality
 visualization.  By changing the "lens" used, a single camera path can output
 images suitable for planetarium domes, immersive and head tracking systems
-(such as the Oculus Rift or recent "spherical" movie viewers such as the
-mobile YouTube app), as well as standard screens.
+(such as the Oculus Rift or recent 360-degree/virtual reality movie viewers
+such as the mobile YouTube app), as well as standard screens.
 
 .. image:: _images/scene_diagram.svg
    :width: 50%
@@ -327,13 +327,19 @@
 
 The :class:`~yt.visualization.volume_rendering.lens.SphericalLens` produces
 a cylindrical-spherical projection.  Movies rendered in this way can be
-displayed in head-tracking devices (e.g. Oculus Rift) or in YouTube 360 view
-(for more information see `the YouTube help
-<https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
-simple matter of running a script on an encoded movie file.)
+displayed as YouTube 360-degree videos (for more information see
+`the YouTube help: Upload 360-degree videos
+<https://support.google.com/youtube/answer/6178631?hl=en>`_).
 :class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`
 is identical to :class:`~yt.visualization.volume_rendering.lens.SphericalLens`
-but it produces two images from nearby camera positions for use in 3D viewing.
+but it produces two images from nearby camera positions for virtual reality
+movies, which can be displayed in head-tracking devices (e.g. Oculus Rift)
+or in mobile YouTube app with Google Cardboard (for more information
+see `the YouTube help: Upload virtual reality videos
+<https://support.google.com/youtube/answer/6316263?hl=en>`_).
+`This virtual reality video
+<https://youtu.be/ZYWY53X7UQE>`_ on YouTube is an example produced with
+:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`.
 
 .. _annotated-vr-example:
 

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,10 +10,10 @@
 
 [flake8]
 # we exclude:
-#      api.py and __init__.py files to avoid spurious unused import errors
-#      _mpl_imports.py for the same reason
+#      api.py, mods.py, _mpl_imports.py, and __init__.py files to avoid spurious 
+#      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r c00b5e9b34ea400f93e11b2313dd80ee81081eee -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 setup.py
--- a/setup.py
+++ b/setup.py
@@ -92,7 +92,8 @@
               libraries=std_libs,
               depends=["yt/utilities/lib/fp_utils.pxd",
                        "yt/geometry/grid_container.pxd",
-                       "yt/geometry/grid_visitors.pxd"]),
+                       "yt/geometry/grid_visitors.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
     Extension("yt.geometry.oct_container",
               ["yt/geometry/oct_container.pyx",
                "yt/utilities/lib/tsearch.c"],
@@ -163,8 +164,10 @@
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/element_mappings.pxd",
+                       "yt/utilities/lib/mesh_triangulation.h",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/primitives.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -174,7 +177,8 @@
                        "yt/utilities/lib/amr_kdtools.pxd",
                        "yt/utilities/lib/grid_traversal.pxd",
                        "yt/utilities/lib/contour_finding.pxd",
-                       "yt/geometry/oct_container.pxd"]),
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
     Extension("yt.utilities.lib.geometry_utils",
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
@@ -189,14 +193,22 @@
                        "yt/utilities/lib/fixed_interpolator.pxd",
                        "yt/utilities/lib/fixed_interpolator.h",
                        ]),
+    Extension("yt.utilities.lib.mesh_triangulation",
+              ["yt/utilities/lib/mesh_triangulation.pyx"],
+              depends=["yt/utilities/lib/mesh_triangulation.h"]),
     Extension("yt.utilities.lib.pixelization_routines",
               ["yt/utilities/lib/pixelization_routines.pyx",
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
-              language="c++",
               libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
                                         "yt/utilities/lib/pixelization_constants.h",
                                         "yt/utilities/lib/element_mappings.pxd"]),
+    Extension("yt.utilities.lib.primitives",
+              ["yt/utilities/lib/primitives.pyx"],
+              libraries=std_libs, 
+              depends=["yt/utilities/lib/primitives.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd",
+                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],
@@ -231,10 +243,13 @@
     "amr_kdtools"
 ]
 for ext_name in lib_exts:
+    lib_deps = ["yt/utilities/lib/fp_utils.pxd"]
+    if ext_name == 'misc_utilities':
+        lib_deps.append('yt/geometry/selection_routines.pxd')
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
+                  libraries=std_libs, depends=lib_deps))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -275,20 +290,31 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd"]),
+                  depends=["yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/mesh_triangulation.h",
+                           "yt/utilities/lib/mesh_intersection.pxd",
+                           "yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/mesh_traversal.pxd"]),
         Extension("yt.utilities.lib.mesh_traversal",
                   ["yt/utilities/lib/mesh_traversal.pyx"],
                   depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd"]),
+                           "yt/utilities/lib/grid_traversal.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
         Extension("yt.utilities.lib.mesh_samplers",
                   ["yt/utilities/lib/mesh_samplers.pyx"],
                   depends=["yt/utilities/lib/mesh_samplers.pxd",
                            "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/primitives.pxd"]),
         Extension("yt.utilities.lib.mesh_intersection",
                   ["yt/utilities/lib/mesh_intersection.pyx"],
                   depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd"]),
+                           "yt/utilities/lib/mesh_construction.pxd",
+                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                           "yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/primitives.pxd",
+                           "yt/utilities/lib/vec3_ops.pxd"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())
@@ -435,7 +461,7 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES + SHADERS_FILES,
+    data_files=MAPSERVER_FILES + [(SHADERS_DIR, SHADERS_FILES)],
     ext_modules=cython_extensions + extensions
 )
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a35d6be43bd6/
Changeset:   a35d6be43bd6
Branch:      yt
User:        brittonsmith
Date:        2016-06-13 18:47:20+00:00
Summary:     Merging.
Affected #:  1 file

diff -r 089ba9aa3fae53daf5473b54f46bb23b92c6cf96 -r a35d6be43bd62dfcad55f165502b5c93b88f12ea setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,116 +83,70 @@
     Extension("yt.geometry.grid_visitors",
               ["yt/geometry/grid_visitors.pyx"],
               include_dirs=["yt/utilities/lib"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/grid_visitors.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.grid_container",
               ["yt/geometry/grid_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/grid_container.pxd",
-                       "yt/geometry/grid_visitors.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.oct_container",
               ["yt/geometry/oct_container.pyx",
                "yt/utilities/lib/tsearch.c"],
               include_dirs=["yt/utilities/lib"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.oct_visitors",
               ["yt/geometry/oct_visitors.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_oct_container",
               ["yt/geometry/particle_oct_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.selection_routines",
               ["yt/geometry/selection_routines.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/grid_traversal.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/oct_visitors.pxd",
-                       "yt/geometry/grid_container.pxd",
-                       "yt/geometry/grid_visitors.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_deposit",
               ["yt/geometry/particle_deposit.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd",
-                       "yt/geometry/particle_deposit.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_smooth",
               ["yt/geometry/particle_smooth.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd",
-                       "yt/geometry/particle_deposit.pxd",
-                       "yt/geometry/particle_smooth.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.fake_octree",
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.spatial.ckdtree",
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
-              libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/element_mappings.pxd",
-                       "yt/utilities/lib/mesh_triangulation.h",
-                       "yt/utilities/lib/vec3_ops.pxd",
-                       "yt/utilities/lib/primitives.pxd"]),
+              depends=["yt/utilities/lib/mesh_triangulation.h"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/amr_kdtools.pxd",
-                       "yt/utilities/lib/grid_traversal.pxd",
-                       "yt/utilities/lib/contour_finding.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.geometry_utils",
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.marching_cubes",
               ["yt/utilities/lib/marching_cubes.pyx",
                "yt/utilities/lib/fixed_interpolator.c"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/fixed_interpolator.h",
-                       ]),
+              depends=["yt/utilities/lib/fixed_interpolator.h"]),
     Extension("yt.utilities.lib.mesh_triangulation",
               ["yt/utilities/lib/mesh_triangulation.pyx"],
               depends=["yt/utilities/lib/mesh_triangulation.h"]),
@@ -200,15 +154,11 @@
               ["yt/utilities/lib/pixelization_routines.pyx",
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
-                                        "yt/utilities/lib/pixelization_constants.h",
-                                        "yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs,
+              depends=["yt/utilities/lib/pixelization_constants.h"]),
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
-              libraries=std_libs, 
-              depends=["yt/utilities/lib/primitives.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd",
-                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],
@@ -222,15 +172,11 @@
               libraries=std_libs,
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/kdtree.h",
-                       "yt/utilities/lib/fixed_interpolator.h",
-                       "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/field_interpolation_tables.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/kdtree.h",
+                       "yt/utilities/lib/fixed_interpolator.h"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
-              libraries=std_libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.alt_ray_tracers",
               ["yt/utilities/lib/alt_ray_tracers.pyx"],
               libraries=std_libs),
@@ -243,13 +189,14 @@
     "amr_kdtools"
 ]
 for ext_name in lib_exts:
-    lib_deps = ["yt/utilities/lib/fp_utils.pxd"]
+    lib_deps = []
     if ext_name == 'misc_utilities':
         lib_deps.append('yt/geometry/selection_routines.pxd')
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=std_libs, depends=lib_deps))
+                  libraries=std_libs,
+                  depends=lib_deps))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -270,11 +217,7 @@
               include_dirs=["yt/frontends/artio/artio_headers/",
                             "yt/geometry/",
                             "yt/utilities/lib/"],
-              depends=glob.glob("yt/frontends/artio/artio_headers/*.c") +
-              ["yt/utilities/lib/fp_utils.pxd",
-               "yt/geometry/oct_container.pxd",
-               "yt/geometry/selection_routines.pxd",
-               "yt/geometry/particle_deposit.pxd"]),
+              depends=glob.glob("yt/frontends/artio/artio_headers/*.c")),
     Extension("yt.utilities.spatial._distance_wrap",
               glob.glob("yt/utilities/spatial/src/*.c")),
     Extension("yt.visualization._MPL",
@@ -290,31 +233,13 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/mesh_triangulation.h",
-                           "yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/mesh_traversal.pxd"]),
+                  depends=["yt/utilities/lib/mesh_triangulation.h"]),
         Extension("yt.utilities.lib.mesh_traversal",
-                  ["yt/utilities/lib/mesh_traversal.pyx"],
-                  depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+                  ["yt/utilities/lib/mesh_traversal.pyx"]),
         Extension("yt.utilities.lib.mesh_samplers",
-                  ["yt/utilities/lib/mesh_samplers.pyx"],
-                  depends=["yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                           "yt/utilities/lib/primitives.pxd"]),
+                  ["yt/utilities/lib/mesh_samplers.pyx"]),
         Extension("yt.utilities.lib.mesh_intersection",
-                  ["yt/utilities/lib/mesh_intersection.pyx"],
-                  depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                           "yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/primitives.pxd",
-                           "yt/utilities/lib/vec3_ops.pxd"]),
+                  ["yt/utilities/lib/mesh_intersection.pyx"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())
@@ -390,9 +315,12 @@
         _build_py.run(self)
 
 class build_ext(_build_ext):
-    # subclass setuptools extension builder to avoid importing numpy
+    # subclass setuptools extension builder to avoid importing cython and numpy
     # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
     def finalize_options(self):
+        from Cython.Build import cythonize
+        self.distribution.ext_modules[:] = cythonize(
+                self.distribution.ext_modules)
         _build_ext.finalize_options(self)
         # Prevent numpy from thinking it is still in its setup process
         # see http://stackoverflow.com/a/21621493/1382869


https://bitbucket.org/yt_analysis/yt/commits/21fc3d027005/
Changeset:   21fc3d027005
Branch:      yt
User:        brittonsmith
Date:        2016-06-13 19:01:53+00:00
Summary:     Reverting cache of cell mask.
Affected #:  1 file

diff -r a35d6be43bd62dfcad55f165502b5c93b88f12ea -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -812,21 +812,18 @@
             if not np.any(m): continue
             yield obj, m
 
-    _cell_mask = None
     @property
     def _cond_ind(self):
-        if self._cell_mask is None:
-            ind = None
-            obj = self.base_object
-            with obj._field_parameter_state(self.field_parameters):
-                for cond in self.conditionals:
-                    res = eval(cond)
-                    if ind is None: ind = res
-                    if ind.shape != res.shape:
-                        raise YTIllDefinedCutRegion(self.conditionals)
-                    np.logical_and(res, ind, ind)
-            self._cell_mask = ind
-        return self._cell_mask
+        ind = None
+        obj = self.base_object
+        with obj._field_parameter_state(self.field_parameters):
+            for cond in self.conditionals:
+                res = eval(cond)
+                if ind is None: ind = res
+                if ind.shape != res.shape:
+                    raise YTIllDefinedCutRegion(self.conditionals)
+                np.logical_and(res, ind, ind)
+        return ind
 
     _particle_mask = None
     @property


https://bitbucket.org/yt_analysis/yt/commits/c4ec736d6d30/
Changeset:   c4ec736d6d30
Branch:      yt
User:        brittonsmith
Date:        2016-06-15 20:01:04+00:00
Summary:     Merging.
Affected #:  39 files

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -704,7 +704,7 @@
 if type -P curl &>/dev/null
 then
     echo "Using curl"
-    export GETFILE="curl -sSO"
+    export GETFILE="curl -sSOL"
 else
     echo "Using wget"
     export GETFILE="wget -nv"

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 setup.py
--- a/setup.py
+++ b/setup.py
@@ -189,14 +189,10 @@
     "amr_kdtools"
 ]
 for ext_name in lib_exts:
-    lib_deps = []
-    if ext_name == 'misc_utilities':
-        lib_deps.append('yt/geometry/selection_routines.pxd')
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=std_libs,
-                  depends=lib_deps))
+                  libraries=std_libs))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -223,9 +219,6 @@
     Extension("yt.visualization._MPL",
               ["yt/visualization/_MPL.c"],
               libraries=std_libs),
-    Extension("yt.utilities.data_point_utilities",
-              ["yt/utilities/data_point_utilities.c"],
-              libraries=std_libs),
 ]
 
 # EMBREE

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -26,7 +26,7 @@
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
-  local_gizmo_000:
+  local_gizmo_001:
     - yt/frontends/gizmo/tests/test_outputs.py
 
   local_halos_000:

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -70,6 +70,9 @@
         if parent is not None:
             self.data.parent = self.parent.data
 
+        if parent is not None:
+            self.data.parent = self.parent.data
+
         # List containing characteristics about clumps that are to be written 
         # out by the write routines.
         if clump_info is None:

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/analysis_modules/level_sets/tests/test_clump_finding.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/tests/test_clump_finding.py
@@ -0,0 +1,74 @@
+"""
+Clump finder tests
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.level_sets.api import \
+    Clump, \
+    find_clumps, \
+    get_lowest_clumps
+from yt.frontends.stream.api import \
+    load_uniform_grid
+from yt.testing import \
+    assert_array_equal, \
+    assert_equal
+
+def test_clump_finding():
+    n_c = 8
+    n_p = 1
+    dims = (n_c, n_c, n_c)
+
+    density = np.ones(dims)
+    high_rho = 10.
+    # add a couple disconnected density enhancements
+    density[2, 2, 2] = high_rho
+    density[6, 6, 6] = high_rho
+
+    # put a particle at the center of one of them
+    dx = 1. / n_c
+    px = 2.5 * dx * np.ones(n_p)
+    
+    data = {"density": density,
+            "particle_mass": np.ones(n_p),
+            "particle_position_x": px,
+            "particle_position_y": px,
+            "particle_position_z": px,
+            "number_of_particles": n_p}
+
+    ds = load_uniform_grid(data, dims)
+
+    ad = ds.all_data()
+    master_clump = Clump(ad, ("gas", "density"))
+    master_clump.add_validator("min_cells", 1)
+
+    find_clumps(master_clump, 0.5, 2. * high_rho, 10.)
+
+    # there should be two children
+    assert_equal(len(master_clump.children), 2)
+
+    leaf_clumps = get_lowest_clumps(master_clump)
+    # two leaf clumps
+    assert_equal(len(leaf_clumps), 2)
+
+
+    # check some clump fields
+    assert_equal(master_clump.children[0]["density"][0].size, 1)
+    assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
+    assert_equal(master_clump.children[0]["particle_mass"].size, 1)
+    assert_array_equal(master_clump.children[0]["particle_mass"], ad["particle_mass"])
+    assert_equal(master_clump.children[1]["density"][0].size, 1)
+    assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
+    assert_equal(master_clump.children[1]["particle_mass"].size, 0)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1194,8 +1194,9 @@
         col6 = pyfits.Column(name='FLUX', format='D', array=np.array([flux.value]))
         col7 = pyfits.Column(name='SPECTRUM', format='80A', array=np.array([phfile+"[PHLIST,1]"]))
         col8 = pyfits.Column(name='IMAGE', format='80A', array=np.array([phfile+"[PHLIST,1]"]))
+        col9 = pyfits.Column(name='SRC_NAME', format='80A', array=np.array(["yt_src"]))
 
-        coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
+        coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8, col9])
 
         wrhdu = pyfits.BinTableHDU.from_columns(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
@@ -1350,13 +1351,17 @@
             f = pyfits.open(self.parameters["RMF"])
             nchan = int(f["EBOUNDS"].header["DETCHANS"])
             num = 0
-            for i in range(1,len(f["EBOUNDS"].columns)+1):
-                if f["EBOUNDS"].header["TTYPE%d" % i] == "CHANNEL":
+            if "MATRIX" in f:
+                mat_key = "MATRIX"
+            elif "SPECRESP MATRIX" in f:
+                mat_key = "SPECRESP MATRIX"
+            for i in range(1,len(f[mat_key].columns)+1):
+                if f[mat_key].header["TTYPE%d" % i] == "F_CHAN":
                     num = i
                     break
             if num > 0:
                 tlmin = "TLMIN%d" % num
-                cmin = int(f["EBOUNDS"].header[tlmin])
+                cmin = int(f[mat_key].header[tlmin])
             else:
                 mylog.warning("Cannot determine minimum allowed value for channel. " +
                               "Setting to 0, which may be wrong.")

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -264,7 +264,7 @@
         emid = self.emid.d
         if self.thermal_broad:
             sigma = E0*np.sqrt(2.*kT*erg_per_keV/(self.A[element]*amu_grams))/cl
-            vec = broaden_lines(E0, sigma, amp, emid)*de
+            vec = broaden_lines(E0, sigma, amp, ebins)
         else:
             vec = np.histogram(E0, ebins, weights=amp)[0]
         tmpspec += vec

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/analysis_modules/photon_simulator/utils.pyx
--- a/yt/analysis_modules/photon_simulator/utils.pyx
+++ b/yt/analysis_modules/photon_simulator/utils.pyx
@@ -1,31 +1,30 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.math cimport exp
-
-cdef double gfac = 1.0/np.sqrt(np.pi)
-
+from libc.math cimport erf
+    
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def broaden_lines(np.ndarray[np.float64_t, ndim=1] E0,
                   np.ndarray[np.float64_t, ndim=1] sigma,
                   np.ndarray[np.float64_t, ndim=1] amp,
-                  np.ndarray[np.float64_t, ndim=1] E):
+                  np.ndarray[np.float64_t, ndim=1] ebins):
 
-    cdef int i, j, n
-    cdef double x, isigma, iamp
-    cdef np.ndarray[np.float64_t, ndim=1] lines
+    cdef int i, j, n, m
+    cdef double x, isigma
+    cdef np.ndarray[np.float64_t, ndim=1] cdf, vec
 
     n = E0.shape[0]
-    m = E.shape[0]
-    lines = np.zeros(m)
-
+    m = ebins.shape[0]
+    cdf = np.zeros(m)
+    vec = np.zeros(m-1)
+    
     for i in range(n):
         isigma = 1.0/sigma[i]
-        iamp = gfac*amp[i]*isigma
         for j in range(m):
-            x = (E[j]-E0[i])*isigma
-            lines[j] += iamp*exp(-x*x)
-
-    return lines
+            x = (ebins[j]-E0[i])*isigma
+            cdf[j] = 0.5*(1+erf(x))
+        for j in range(m-1):
+            vec[j] = vec[j] + (cdf[j+1] - cdf[j])*amp[i]
+    return vec

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -830,12 +830,14 @@
     def _part_ind(self):
         if self._particle_mask is None:
             parent = getattr(self, "parent", self.base_object)
+            units = "code_length"
             mask = points_in_cells(
-                self["x"], self["y"], self["z"],
-                self["dx"], self["dy"], self["dz"],
-                parent["particle_position_x"].to("code_length"),
-                parent["particle_position_y"].to("code_length"),
-                parent["particle_position_z"].to("code_length"))
+                self["x"].to(units), self["y"].to(units),
+                self["z"].to(units), self["dx"].to(units),
+                self["dy"].to(units), self["dz"].to(units),
+                parent["particle_position_x"].to(units),
+                parent["particle_position_y"].to(units),
+                parent["particle_position_z"].to(units))
             self._particle_mask = mask
         return self._particle_mask
 

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -90,6 +90,10 @@
                        particle_type = particle_type,
                        units = unit_system["number_density"])
 
+    return [(ftype, "%s_number_density" % species),
+            (ftype, "%s_density" % species),
+            (ftype, "%s_mass" % species)]
+
 def add_species_field_by_fraction(registry, ftype, species, 
                                   particle_type = False):
     """
@@ -114,6 +118,10 @@
                        particle_type = particle_type,
                        units = unit_system["number_density"])
 
+    return [(ftype, "%s_number_density" % species),
+            (ftype, "%s_density" % species),
+            (ftype, "%s_mass" % species)]
+
 def add_species_aliases(registry, ftype, alias_species, species):
     """
     This takes a field registry, a fluid type, and two species names.  

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/frontends/eagle/tests/test_outputs.py
--- a/yt/frontends/eagle/tests/test_outputs.py
+++ b/yt/frontends/eagle/tests/test_outputs.py
@@ -24,3 +24,10 @@
 @requires_file(s28)
 def test_EagleDataset():
     assert isinstance(data_dir_load(s28), EagleDataset)
+
+s399 = "snipshot_399_z000p000/snip_399_z000p000.0.hdf5"
+ at requires_file(s399)
+def test_Snipshot():
+    ds = data_dir_load(s399)
+    ds.index
+    assert isinstance(ds, EagleDataset)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -25,7 +25,6 @@
 
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
-g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -42,11 +41,6 @@
 )
 iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
 
-g64_fields = iso_fields.copy()
-g64_fields["deposit", "PartType4_density"] = None
-g64_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
-
-
 @requires_file(isothermal_h5)
 @requires_file(isothermal_bin)
 def test_GadgetDataset():
@@ -62,10 +56,3 @@
     for test in sph_answer(ds, 'snap_505', 2**17, iso_fields):
         test_iso_collapse.__name__ = test.description
         yield test
-
- at requires_ds(g64, big_data=True)
-def test_gizmo_64():
-    ds = data_dir_load(g64, kwargs=g64_kwargs)
-    for test in sph_answer(ds, 'snap_N64L16_135', 524288, g64_fields):
-        test_gizmo_64.__name__ = test.description
-        yield test

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -23,3 +23,5 @@
 
 from .io import \
       IOHandlerGDFHDF5
+
+from . import tests

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/frontends/gizmo/tests/test_outputs.py
--- a/yt/frontends/gizmo/tests/test_outputs.py
+++ b/yt/frontends/gizmo/tests/test_outputs.py
@@ -34,13 +34,16 @@
         (('gas', 'velocity_magnitude'), None),
         (("deposit", "all_count"), None),
         (("deposit", "all_cic"), None),
+        (("deposit", "PartType0_density"), None),
     ]
 )
 
- at requires_ds(FIRE_m12i)
-def test_GizmoDataset():
-    ds = data_dir_load(FIRE_m12i)
+g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
+
+ at requires_ds(g64, big_data=True)
+def test_gizmo_64():
+    ds = data_dir_load(g64)
     assert isinstance(ds, GizmoDataset)
-    for test in sph_answer(ds, 'snapshot_600', 4786950, fields):
-        test_GizmoDataset.__name__ = test.description
+    for test in sph_answer(ds, 'snap_N64L16_135', 524288, fields):
+        test_gizmo_64.__name__ = test.description
         yield test

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/frontends/owls/fields.py
--- a/yt/frontends/owls/fields.py
+++ b/yt/frontends/owls/fields.py
@@ -76,8 +76,6 @@
 
         smoothed_suffixes = ("_number_density", "_density", "_mass")
 
-
-
         # we add particle element fields for stars and gas
         #-----------------------------------------------------
         if ptype in self._add_elements:
@@ -144,6 +142,9 @@
                     symbol = ion[0:1].capitalize()
                     roman = int(ion[1:])
 
+                if (ptype, symbol + "_fraction") not in self.field_aliases:
+                    continue
+
                 pstr = "_p" + str(roman-1)
                 yt_ion = symbol + pstr
 
@@ -166,6 +167,9 @@
                     symbol = ion[0:1].capitalize()
                     roman = int(ion[1:])
 
+                if (ptype, symbol + "_fraction") not in self.field_aliases:
+                    continue
+
                 pstr = "_p" + str(roman-1)
                 yt_ion = symbol + pstr
 
@@ -201,6 +205,9 @@
                 symbol = ion[0:1].capitalize()
                 roman = int(ion[1:])
 
+            if (ptype, symbol + "_fraction") not in self.field_aliases:
+                continue
+
             pstr = "_p" + str(roman-1)
             yt_ion = symbol + pstr
             ftype = ptype

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -963,3 +963,24 @@
     except ImportError:
         requests = None
     return requests
+
+ at contextlib.contextmanager
+def dummy_context_manager(*args, **kwargs):
+    yield
+
+def matplotlib_style_context(style_name=None, after_reset=True):
+    """Returns a context manager for controlling matplotlib style.
+
+    Arguments are passed to matplotlib.style.context() if specified. Defaults
+    to setting "classic" style, after resetting to the default config parameters.
+
+    On older matplotlib versions (<=1.5.0) where matplotlib.style isn't
+    available, returns a dummy context manager.
+    """
+    if style_name is None:
+        style_name = 'classic'
+    try:
+        import matplotlib.style
+        return matplotlib.style.context(style_name, after_reset=after_reset)
+    except ImportError:
+        return dummy_context_manager()

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -29,6 +29,7 @@
 
 
 class CartesianCoordinateHandler(CoordinateHandler):
+    name = "cartesian"
 
     def __init__(self, ds, ordering = ('x','y','z')):
         super(CartesianCoordinateHandler, self).__init__(ds, ordering)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -71,6 +71,7 @@
                     ds.quan(width[0], fix_unitary(width[1])))
 
 class CoordinateHandler(object):
+    name = None
     
     def __init__(self, ds, ordering):
         self.ds = weakref.proxy(ds)
@@ -132,10 +133,13 @@
         self._axis_id = ai
         return ai
 
+    _image_axis_name = None
     @property
     def image_axis_name(self):
         # Default
-        rv = {}
+        if self._image_axis_name is not None:
+            return self._image_axis_name
+        self._image_axis_name = rv = {}
         for i in range(3):
             rv[i] = (self.axis_name[self.x_axis[i]],
                      self.axis_name[self.y_axis[i]])

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -29,6 +29,7 @@
 #
 
 class CylindricalCoordinateHandler(CoordinateHandler):
+    name = "cylindrical"
 
     def __init__(self, ds, ordering = ('r', 'z', 'theta')):
         super(CylindricalCoordinateHandler, self).__init__(ds, ordering)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -23,6 +23,7 @@
     pixelize_cylinder, pixelize_aitoff
 
 class GeographicCoordinateHandler(CoordinateHandler):
+    name = "geographic"
 
     def __init__(self, ds, ordering = ('latitude', 'longitude', 'altitude')):
         super(GeographicCoordinateHandler, self).__init__(ds, ordering)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -18,6 +18,7 @@
 
 
 class PolarCoordinateHandler(CylindricalCoordinateHandler):
+    name = "polar"
 
     def __init__(self, ds, ordering = ('r', 'theta', 'z')):
         super(PolarCoordinateHandler, self).__init__(ds, ordering)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -20,6 +20,7 @@
     _get_coord_fields
 
 class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+    name = "spectral_cube"
 
     def __init__(self, ds, ordering = ('x', 'y', 'z')):
         ordering = tuple("xyz"[axis] for axis in

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -23,6 +23,7 @@
     pixelize_cylinder, pixelize_aitoff
 
 class SphericalCoordinateHandler(CoordinateHandler):
+    name = "spherical"
 
     def __init__(self, ds, ordering = ('r', 'theta', 'phi')):
         super(SphericalCoordinateHandler, self).__init__(ds, ordering)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -477,6 +477,9 @@
     test_unit = Unit('cm**-3', base_value=1.0, registry=ds.unit_registry)
     assert_equal(test_unit.latex_repr, '\\frac{1}{\\rm{cm}^{3}}')
 
+    test_unit = Unit('m_geom/l_geom**3')
+    assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -626,6 +626,29 @@
     new_length = fix_length(length, ds=ds)
     yield assert_equal, YTQuantity(10, 'cm'), new_length
 
+def test_code_unit_combinations():
+    """
+    Test comparing code units coming from different datasets
+    """
+    ds1 = fake_random_ds(64, nprocs=1, length_unit=1)
+    ds2 = fake_random_ds(64, nprocs=1, length_unit=10)
+
+    q1 = ds1.quan(1, 'code_length')
+    q2 = ds2.quan(1, 'code_length')
+
+    assert_equal(10*q1, q2)
+    assert_equal(q1/q2, 0.1)
+    assert_true(q1 < q2)
+    assert_true(q2 > q1)
+    assert_true(not bool(q1 > q2))
+    assert_true(not bool(q2 < q1))
+    assert_true(q1 != q2)
+    assert_true(not bool(q1 == q2))
+
+    assert_equal((q1 + q2).in_cgs().value, 11)
+    assert_equal((q2 + q1).in_cgs().value, 11)
+    assert_equal((q1 - q2).in_cgs().value, -9)
+    assert_equal((q2 - q1).in_cgs().value, 9)
 
 def test_ytarray_pickle():
     ds = fake_random_ds(64, nprocs=1)

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -110,8 +110,25 @@
             symbol_table[ex] = registry.lut[str(ex)][3]
         except:
             symbol_table[ex] = r"\rm{" + str(ex).replace('_', '\ ') + "}"
+
+    # invert the symbol table dict to look for keys with identical values
+    invert_symbols = {}
+    for key, value in symbol_table.items():
+        if value not in invert_symbols:
+            invert_symbols[value] = [key]
+        else:
+            invert_symbols[value].append(key)
+
+    # if there are any units with identical latex representations, substitute
+    # units to avoid  uncanceled terms in the final latex expresion.
+    for val in invert_symbols:
+        symbols = invert_symbols[val]
+        for i in range(1, len(symbols)):
+            expr = expr.subs(symbols[i], symbols[0])
+
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
+
     if latex_repr == '1':
         return ''
     else:
@@ -258,7 +275,11 @@
     def latex_repr(self):
         if self._latex_repr is not None:
             return self._latex_repr
-        self._latex_repr = get_latex_representation(self.expr, self.registry)
+        if self.expr.is_Atom:
+            expr = self.expr
+        else:
+            expr = self.expr.copy()
+        self._latex_repr = get_latex_representation(expr, self.registry)
         return self._latex_repr
 
     ### Some sympy conventions

diff -r 21fc3d027005d1f032bf9dc81f0db6d6f87eda6d -r c4ec736d6d306253264b97891d90db87fc520255 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -167,7 +167,8 @@
     # Check that other is a YTArray.
     if hasattr(other, 'units'):
         if this.units.expr is other.units.expr:
-            return other
+            if this.units.base_value == other.units.base_value:
+                return other
         if not this.units.same_dimensions_as(other.units):
             raise YTUnitOperationError(op_string, this.units, other.units)
         return other.in_units(this.units)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e53f63770e23/
Changeset:   e53f63770e23
Branch:      yt
User:        brittonsmith
Date:        2016-07-28 09:40:58+00:00
Summary:     Merging.
Affected #:  2 files

diff -r 8bcf15297a44ba45987308b185a700d49563d0e3 -r e53f63770e233dbf5feed3346d7014e5e165e6fb yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import copy
+import h5py
 import numpy as np
 import uuid
 
@@ -49,13 +50,25 @@
 class Clump(object):
     children = None
     def __init__(self, data, field, parent=None,
-                 clump_info=None, validators=None):
+                 clump_info=None, validators=None,
+                 base=None):
         self.data = data
         self.field = field
         self.parent = parent
         self.quantities = data.quantities
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
+        self.info = {}
+
+        if base is None:
+            base = self
+            self.total_clumps = 0
+        self.base = base
+        self.clump_id = self.base.total_clumps
+        self.base.total_clumps += 1
+
+        if parent is not None:
+            self.data.parent = self.parent.data
 
         if parent is not None:
             self.data.parent = self.parent.data
@@ -160,7 +173,8 @@
                 continue
             self.children.append(Clump(new_clump, self.field, parent=self,
                                        clump_info=self.clump_info,
-                                       validators=self.validators))
+                                       validators=self.validators,
+                                       base=self.base))
 
     def pass_down(self,operation):
         """
@@ -302,3 +316,46 @@
             write_clumps(child, 0, fh)
     if top:
         fh.close()
+
+def write_clump_index_h5(clump, level, fh):
+    print level
+    top = False
+    if not isinstance(fh, h5py.File) and \
+      not isinstance(fh, h5py.Group):
+        fh = h5py.File(fh, "w")
+        top = True
+    for item in clump.clump_info:
+        item(clump)
+        my_info = clump.info[item.name][1]
+        fh.attrs[item.name] = my_info
+        if hasattr(my_info, "units") and \
+          "dimensionless" not in str(my_info.units):
+            units = str(my_info.units)
+        else:
+            units = ""
+        fh.attrs["%s_units" % item.name] = units
+    if ((clump.children is not None) and (len(clump.children) > 0)):
+        i = 0
+        for child in clump.children:
+            my_group = fh.create_group("child_%04d" % i)
+            write_clump_index_h5(child, (level+1), my_group)
+            i += 1
+    if top:
+        fh.close()
+        
+def write_clumps_h5(clump, filename):
+    clump_list = get_lowest_clumps(clump)
+    fh = h5py.File(filename, "w")
+    for item in clump.clump_info:
+        quantity = []
+        for my_clump in clump_list:
+            item(my_clump)
+            quantity.append(my_clump.info[item.name][1])
+        quantity = clump.data.ds.arr(quantity)
+        dataset = fh.create_dataset(item.name, data=quantity)
+        if "dimensionless" in str(quantity.units):
+            units = ""
+        else:
+            units = str(quantity.units)
+        dataset.attrs["units"] = units
+    fh.close()

diff -r 8bcf15297a44ba45987308b185a700d49563d0e3 -r e53f63770e233dbf5feed3346d7014e5e165e6fb yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -21,14 +21,15 @@
 clump_info_registry = OperatorRegistry()
 
 def add_clump_info(name, function):
-    clump_info_registry[name] = ClumpInfoCallback(function)
+    clump_info_registry[name] = ClumpInfoCallback(name, function)
 
 class ClumpInfoCallback(object):
     r"""
     A ClumpInfoCallback is a function that takes a clump, computes a 
     quantity, and returns a string to be printed out for writing clump info.
     """
-    def __init__(self, function, args=None, kwargs=None):
+    def __init__(self, name, function, args=None, kwargs=None):
+        self.name = name
         self.function = function
         self.args = args
         if self.args is None: self.args = []
@@ -36,43 +37,46 @@
         if self.kwargs is None: self.kwargs = {}
 
     def __call__(self, clump):
-        return self.function(clump, *self.args, **self.kwargs)
+        if self.name not in clump.info:
+            clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
+        rv = clump.info[self.name]
+        return rv[0] % rv[1]
     
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
-    return "Cells: %d." % n_cells
+    return "Cells: %d.", n_cells
 add_clump_info("total_cells", _total_cells)
 
 def _cell_mass(clump):
     cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
-    return "Mass: %e Msun." % cell_mass
+    return "Mass: %e Msun.", cell_mass
 add_clump_info("cell_mass", _cell_mass)
 
 def _mass_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
-    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (mass-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
 
 def _volume_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("index", "cell_volume")).in_units("Msun")
-    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (volume-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
 
 def _max_grid_level(clump):
     max_level = clump.data["index", "grid_level"].max()
-    return "Max grid level: %d." % max_level
+    return "Max grid level: %d.", max_level
 add_clump_info("max_grid_level", _max_grid_level)
 
 def _min_number_density(clump):
     min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
-    return "Min number density: %.6e cm^-3." % min_n
+    return "Min number density: %.6e cm^-3.", min_n
 add_clump_info("min_number_density", _min_number_density)
 
 def _max_number_density(clump):
     max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
-    return "Max number density: %.6e cm^-3." % max_n
+    return "Max number density: %.6e cm^-3.", max_n
 add_clump_info("max_number_density", _max_number_density)
 
 def _distance_to_main_clump(clump, units="pc"):
@@ -82,6 +86,7 @@
     master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
     my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e %s." % \
-      (distance.in_units(units), units)
+    distance.convert_to_units("pc")
+    return "Distance from master center of mass: %%.6e %s." % units, \
+      distance.in_units(units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)


https://bitbucket.org/yt_analysis/yt/commits/2f22347511b2/
Changeset:   2f22347511b2
Branch:      yt
User:        brittonsmith
Date:        2016-07-28 09:53:19+00:00
Summary:     Make gravitational boundness consider gravity of particles on gas and vice-versa.
Affected #:  1 file

diff -r e53f63770e233dbf5feed3346d7014e5e165e6fb -r 2f22347511b20f5f1f7ed2054abdb919ae85965d yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -64,28 +64,30 @@
              (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
              (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
 
+    if use_particles:
+        m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
+                            clump["all", "particle_mass"].in_cgs()])
+        px = np.concatenaate([clump["index", "x"].in_cgs(),
+                              clump["all", "particle_position_x"].in_cgs()])
+        py = np.concatenaate([clump["index", "y"].in_cgs(),
+                              clump["all", "particle_position_y"].in_cgs()])
+        pz = np.concatenaate([clump["index", "z"].in_cgs(),
+                              clump["all", "particle_position_z"].in_cgs()])
+    else:
+        m = clump["gas", "cell_mass"].in_cgs()
+        px = clump["index", "x"].in_cgs()
+        py = clump["index", "y"].in_cgs()
+        pz = clump["index", "z"].in_cgs()
+
     potential = clump.data.ds.quan(G *
         gravitational_binding_energy(
-            clump["gas", "cell_mass"].in_cgs(),
-            clump["index", "x"].in_cgs(),
-            clump["index", "y"].in_cgs(),
-            clump["index", "z"].in_cgs(),
+            m, px, py, pz,
             truncate, (kinetic / G).in_cgs()),
-        kinetic.in_cgs().units)
-    
+            kinetic.in_cgs().units)
+
     if truncate and potential >= kinetic:
         return True
 
-    if use_particles:
-        potential += clump.data.ds.quan(G *
-            gravitational_binding_energy(
-                clump["all", "particle_mass"].in_cgs(),
-                clump["all", "particle_position_x"].in_cgs(),
-                clump["all", "particle_position_y"].in_cgs(),
-                clump["all", "particle_position_z"].in_cgs(),
-                truncate, ((kinetic - potential) / G).in_cgs()),
-        kinetic.in_cgs().units)
-
     return potential >= kinetic
 add_validator("gravitationally_bound", _gravitationally_bound)
 


https://bitbucket.org/yt_analysis/yt/commits/334ffda6c6e4/
Changeset:   334ffda6c6e4
Branch:      yt
User:        brittonsmith
Date:        2016-07-28 13:06:00+00:00
Summary:     Import numpy, arg.
Affected #:  1 file

diff -r 2f22347511b20f5f1f7ed2054abdb919ae85965d -r 334ffda6c6e448185b1b6cb0e510cf773240cbcb yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,6 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.utilities.lib.misc_utilities import \
     gravitational_binding_energy
 from yt.utilities.operator_registry import \


https://bitbucket.org/yt_analysis/yt/commits/5bc7de20fd7f/
Changeset:   5bc7de20fd7f
Branch:      yt
User:        brittonsmith
Date:        2016-07-28 14:33:28+00:00
Summary:     Spelling.
Affected #:  1 file

diff -r 334ffda6c6e448185b1b6cb0e510cf773240cbcb -r 5bc7de20fd7f961a817761bb369b03f995c95229 yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -69,12 +69,12 @@
     if use_particles:
         m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
                             clump["all", "particle_mass"].in_cgs()])
-        px = np.concatenaate([clump["index", "x"].in_cgs(),
-                              clump["all", "particle_position_x"].in_cgs()])
-        py = np.concatenaate([clump["index", "y"].in_cgs(),
-                              clump["all", "particle_position_y"].in_cgs()])
-        pz = np.concatenaate([clump["index", "z"].in_cgs(),
-                              clump["all", "particle_position_z"].in_cgs()])
+        px = np.concatenate([clump["index", "x"].in_cgs(),
+                             clump["all", "particle_position_x"].in_cgs()])
+        py = np.concatenate([clump["index", "y"].in_cgs(),
+                             clump["all", "particle_position_y"].in_cgs()])
+        pz = np.concatenate([clump["index", "z"].in_cgs(),
+                             clump["all", "particle_position_z"].in_cgs()])
     else:
         m = clump["gas", "cell_mass"].in_cgs()
         px = clump["index", "x"].in_cgs()


https://bitbucket.org/yt_analysis/yt/commits/817152b9376e/
Changeset:   817152b9376e
Branch:      yt
User:        brittonsmith
Date:        2016-08-02 15:25:44+00:00
Summary:     Store contour key for each clump.
Affected #:  1 file

diff -r 5bc7de20fd7f961a817761bb369b03f995c95229 -r 817152b9376eecbd1d7347541629cd3bf2211468 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -51,7 +51,7 @@
     children = None
     def __init__(self, data, field, parent=None,
                  clump_info=None, validators=None,
-                 base=None):
+                 base=None, contour_key=None):
         self.data = data
         self.field = field
         self.parent = parent
@@ -66,6 +66,7 @@
         self.base = base
         self.clump_id = self.base.total_clumps
         self.base.total_clumps += 1
+        self.contour_key = contour_key
 
         if parent is not None:
             self.data.parent = self.parent.data
@@ -174,7 +175,8 @@
             self.children.append(Clump(new_clump, self.field, parent=self,
                                        clump_info=self.clump_info,
                                        validators=self.validators,
-                                       base=self.base))
+                                       base=self.base,
+                                       contour_key=contour_key))
 
     def pass_down(self,operation):
         """


https://bitbucket.org/yt_analysis/yt/commits/56300f5f7153/
Changeset:   56300f5f7153
Branch:      yt
User:        brittonsmith
Date:        2016-08-02 15:33:41+00:00
Summary:     Store contour id for each clump.
Affected #:  1 file

diff -r 817152b9376eecbd1d7347541629cd3bf2211468 -r 56300f5f715303b2837ecbec2f87c3e339029825 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -51,7 +51,8 @@
     children = None
     def __init__(self, data, field, parent=None,
                  clump_info=None, validators=None,
-                 base=None, contour_key=None):
+                 base=None, contour_key=None,
+                 contour_id=None):
         self.data = data
         self.field = field
         self.parent = parent
@@ -67,6 +68,7 @@
         self.clump_id = self.base.total_clumps
         self.base.total_clumps += 1
         self.contour_key = contour_key
+        self.contour_id = contour_id
 
         if parent is not None:
             self.data.parent = self.parent.data


https://bitbucket.org/yt_analysis/yt/commits/5cde0ad56252/
Changeset:   5cde0ad56252
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 12:43:34+00:00
Summary:     Only store clump info functions in the base clump.  Also, add a tree walk function and stub for save_as_dataset.
Affected #:  1 file

diff -r 56300f5f715303b2837ecbec2f87c3e339029825 -r 5cde0ad562526b15f65432249b8019ba825a8bef yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -20,8 +20,12 @@
 
 from yt.fields.derived_field import \
     ValidateSpatial
-from yt.funcs import mylog, iterable
-from yt.extern.six import string_types
+from yt.funcs import \
+    get_output_filename, \
+    iterable, \
+    mylog
+from yt.extern.six import \
+    string_types
 
 from .clump_info_items import \
     clump_info_registry
@@ -61,9 +65,15 @@
         self.max_val = self.data[field].max()
         self.info = {}
 
+        # is this the parent clump?
         if base is None:
             base = self
             self.total_clumps = 0
+            if clump_info is None:
+                self.set_default_clump_info()
+            else:
+                self.clump_info = clump_info
+
         self.base = base
         self.clump_id = self.base.total_clumps
         self.base.total_clumps += 1
@@ -76,15 +86,6 @@
         if parent is not None:
             self.data.parent = self.parent.data
 
-        # List containing characteristics about clumps that are to be written 
-        # out by the write routines.
-        if clump_info is None:
-            self.set_default_clump_info()
-        else:
-            # Clump info will act the same if add_info_item is called 
-            # before or after clump finding.
-            self.clump_info = copy.deepcopy(clump_info)
-
         if validators is None:
             validators = []
         self.validators = validators
@@ -144,7 +145,7 @@
     def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
-        for item in self.clump_info:
+        for item in self.base.clump_info:
             value = item(self)
             f_ptr.write("%s%s\n" % ('\t'*level, value))
 
@@ -175,11 +176,52 @@
                 # Using "ones" here will speed things up.
                 continue
             self.children.append(Clump(new_clump, self.field, parent=self,
-                                       clump_info=self.clump_info,
                                        validators=self.validators,
                                        base=self.base,
                                        contour_key=contour_key))
 
+    def twalk(self):
+        yield self
+        if self.children is None:
+            return
+        for child in self.children:
+            for a_node in child.twalk():
+                yield a_node
+
+    def save_as_dataset(self, filename=None, fields=None):
+        r"""Export clump tree to a reloadable yt dataset.
+
+        This function will take a clump object and output a dataset
+        containing the fields given in the ``fields`` list and all info
+        items.  The resulting dataset can be reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset and the clump
+            index.
+        fields : list of strings or tuples, optional
+            If this is supplied, it is the list of fields to be saved to
+            disk.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        """
+
+        keyword = "%s_clump_%d" % (str(self.ds), self.clump_id)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+
+
+        return filename
+
     def pass_down(self,operation):
         """
         Performs an operation on a clump with an exec and passes the 


https://bitbucket.org/yt_analysis/yt/commits/1a360734e7c6/
Changeset:   1a360734e7c6
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 13:39:22+00:00
Summary:     Implementing saving of clump info data.
Affected #:  1 file

diff -r 5cde0ad562526b15f65432249b8019ba825a8bef -r 1a360734e7c6d982e9d17d651fb579ffc390cb11 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -20,6 +20,8 @@
 
 from yt.fields.derived_field import \
     ValidateSpatial
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.funcs import \
     get_output_filename, \
     iterable, \
@@ -215,10 +217,44 @@
 
         """
 
-        keyword = "%s_clump_%d" % (str(self.ds), self.clump_id)
+        ds = self.data.ds
+        keyword = "%s_clump_%d" % (str(ds), self.clump_id)
         filename = get_output_filename(filename, keyword, ".h5")
 
+        clump_info = dict([(ci.name, []) for ci in self.base.clump_info])
+        clump_info.update(
+            dict([(field, []) for field in ["clump_id", "parent_id",
+                                            "contour_key", "contour_id"]]))
+        for clump in self.twalk():
+            clump_info["clump_id"].append(clump.clump_id)
+            if clump.parent is None:
+                parent_id = -1
+            else:
+                parent_id = clump.parent.clump_id
+            clump_info["parent_id"].append(parent_id)
 
+            contour_key = clump.contour_key
+            if contour_key is None: contour_key = -1
+            clump_info["contour_key"].append(contour_key)
+            contour_id = clump.contour_id
+            if contour_id is None: contour_id = -1
+            clump_info["contour_id"].append(contour_id)
+
+            for ci in self.base.clump_info:
+                ci(clump)
+                clump_info[ci.name].append(clump.info[ci.name][1])
+        for ci in clump_info:
+            if hasattr(clump_info[ci][0], "units"):
+                clump_info[ci] = ds.arr(clump_info[ci])
+            else:
+                clump_info[ci] = np.array(clump_info[ci])
+
+        field_types = dict([(ci, "clump") for ci in clump_info])
+        extra_attrs = {"data_type": "yt_clump_tree",
+                       "container_type": "yt_clump_tree"}
+        save_as_dataset(ds, filename, clump_info,
+                        field_types=field_types,
+                        extra_attrs=extra_attrs)
 
         return filename
 


https://bitbucket.org/yt_analysis/yt/commits/3b5e3571b2b8/
Changeset:   3b5e3571b2b8
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 15:20:33+00:00
Summary:     Implementing data field saving for clumps.  Still need to properly filter particle fields.
Affected #:  1 file

diff -r 1a360734e7c6d982e9d17d651fb579ffc390cb11 -r 3b5e3571b2b8902f14d8825eb05a1181aa2ff401 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -180,7 +180,8 @@
             self.children.append(Clump(new_clump, self.field, parent=self,
                                        validators=self.validators,
                                        base=self.base,
-                                       contour_key=contour_key))
+                                       contour_key=contour_key,
+                                       contour_id=cid))
 
     def twalk(self):
         yield self
@@ -221,6 +222,7 @@
         keyword = "%s_clump_%d" % (str(ds), self.clump_id)
         filename = get_output_filename(filename, keyword, ".h5")
 
+        # collect clump info fields
         clump_info = dict([(ci.name, []) for ci in self.base.clump_info])
         clump_info.update(
             dict([(field, []) for field in ["clump_id", "parent_id",
@@ -249,11 +251,61 @@
             else:
                 clump_info[ci] = np.array(clump_info[ci])
 
-        field_types = dict([(ci, "clump") for ci in clump_info])
+        ftypes = dict([(ci, "clump") for ci in clump_info])
+
+        # collect data fields
+        if fields is not None:
+            contour_fields = [("index", "contours_%s" % ckey)
+                              for ckey in np.unique(clump_info["contour_key"]) \
+                              if ckey != "-1"]
+
+            ptypes = []
+            field_data = {}
+            need_grid_positions = False
+            for f in self.base.data._determine_fields(fields) + contour_fields:
+                field_data[f] = self.base[f]
+                if ds.field_info[f].particle_type:
+                    if f[0] not in ptypes:
+                        ptypes.append(f[0])
+                    need_particle_positions = True
+                    ftypes[f] = f[0]
+                else:
+                    need_grid_positions = True
+                    ftypes[f] = "grid"
+
+            if len(ptypes) > 0:
+                for ax in "xyz":
+                    for ptype in ptypes:
+                        p_field = (ptype, "particle_position_%s" % ax)
+                        if p_field in ds.field_info and \
+                          p_field not in field_data:
+                            ftypes[p_field] = p_field[0]
+                            field_data[p_field] = self.base[p_field]
+            if need_grid_positions:
+                for ax in "xyz":
+                    g_field = ("index", ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        field_data[g_field] = self.base[g_field]
+                        ftypes[g_field] = "grid"
+                    g_field = ("index", "d" + ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        ftypes[g_field] = "grid"
+                        field_data[g_field] = self.base[g_field]
+
+            if self.contour_key is not None:
+                cfield = ("index", "contours_%s" % self.contour_key)
+                my_filter = self.base[cfield] == self.contour_id
+                for field in field_data:
+                    if ftypes[field] == "grid":
+                        field_data[field] = field_data[field][my_filter]
+
+        clump_info.update(field_data)
         extra_attrs = {"data_type": "yt_clump_tree",
                        "container_type": "yt_clump_tree"}
         save_as_dataset(ds, filename, clump_info,
-                        field_types=field_types,
+                        field_types=ftypes,
                         extra_attrs=extra_attrs)
 
         return filename


https://bitbucket.org/yt_analysis/yt/commits/69ba45951735/
Changeset:   69ba45951735
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 19:43:13+00:00
Summary:     Enabling particle field querying for different particle types.
Affected #:  1 file

diff -r 3b5e3571b2b8902f14d8825eb05a1181aa2ff401 -r 69ba4595173538f8686e35a5f25b8861bef43b7d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -795,7 +795,8 @@
             f = self.base_object[field]
             if f.shape != ind.shape:
                 parent = getattr(self, "parent", self.base_object)
-                self.field_data[field] = parent[field][self._part_ind]
+                self.field_data[field] = \
+                  parent[field][self._part_ind(field[0])]
             else:
                 self.field_data[field] = self.base_object[field][ind]
 
@@ -825,21 +826,23 @@
                 np.logical_and(res, ind, ind)
         return ind
 
-    _particle_mask = None
-    @property
-    def _part_ind(self):
-        if self._particle_mask is None:
+    _particle_mask = {}
+    def _part_ind(self, ptype):
+        if self._particle_mask.get(ptype) is None:
             parent = getattr(self, "parent", self.base_object)
             units = "code_length"
             mask = points_in_cells(
-                self["x"].to(units), self["y"].to(units),
-                self["z"].to(units), self["dx"].to(units),
-                self["dy"].to(units), self["dz"].to(units),
-                parent["particle_position_x"].to(units),
-                parent["particle_position_y"].to(units),
-                parent["particle_position_z"].to(units))
-            self._particle_mask = mask
-        return self._particle_mask
+                self[("index", "x")].to(units),
+                self[("index", "y")].to(units),
+                self[("index", "z")].to(units),
+                self[("index", "dx")].to(units),
+                self[("index", "dy")].to(units),
+                self[("index", "dz")].to(units),
+                parent[(ptype, "particle_position_x")].to(units),
+                parent[(ptype, "particle_position_y")].to(units),
+                parent[(ptype, "particle_position_z")].to(units))
+            self._particle_mask[ptype] = mask
+        return self._particle_mask[ptype]
 
     @property
     def icoords(self):


https://bitbucket.org/yt_analysis/yt/commits/aa07db06b9fb/
Changeset:   aa07db06b9fb
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 22:32:42+00:00
Summary:     Adding particle field filtering.
Affected #:  1 file

diff -r 69ba4595173538f8686e35a5f25b8861bef43b7d -r aa07db06b9fbb1c28b6ab4ffbd714dd65842f81b yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -255,9 +255,10 @@
 
         # collect data fields
         if fields is not None:
-            contour_fields = [("index", "contours_%s" % ckey)
-                              for ckey in np.unique(clump_info["contour_key"]) \
-                              if ckey != "-1"]
+            contour_fields = \
+              [("index", "contours_%s" % ckey)
+               for ckey in np.unique(clump_info["contour_key"]) \
+               if ckey != "-1"]
 
             ptypes = []
             field_data = {}
@@ -281,6 +282,19 @@
                           p_field not in field_data:
                             ftypes[p_field] = p_field[0]
                             field_data[p_field] = self.base[p_field]
+
+                for clump in self.twalk():
+                    if clump.contour_key is None:
+                        continue
+                    for ptype in ptypes:
+                        cfield = (ptype, "contours_%s" % clump.contour_key)
+                        if cfield not in field_data:
+                            field_data[cfield] = \
+                              -1 * clump.base.data[(ptype, "particle_ones")]
+                            ftypes[cfield] = ptype
+                        field_data[cfield][clump.data._part_ind(ptype)] = \
+                          clump.contour_id
+
             if need_grid_positions:
                 for ax in "xyz":
                     g_field = ("index", ax)
@@ -295,11 +309,16 @@
                         field_data[g_field] = self.base[g_field]
 
             if self.contour_key is not None:
-                cfield = ("index", "contours_%s" % self.contour_key)
-                my_filter = self.base[cfield] == self.contour_id
+                cfilters = {}
                 for field in field_data:
                     if ftypes[field] == "grid":
-                        field_data[field] = field_data[field][my_filter]
+                        ftype = "index"
+                    else:
+                        ftype = field[0]
+                    cfield = (ftype, "contours_%s" % self.contour_key)
+                    if cfield not in cfilters:
+                        cfilters[cfield] = field_data[cfield] == self.contour_id
+                    field_data[field] = field_data[field][cfilters[cfield]]
 
         clump_info.update(field_data)
         extra_attrs = {"data_type": "yt_clump_tree",


https://bitbucket.org/yt_analysis/yt/commits/066218717cb3/
Changeset:   066218717cb3
Branch:      yt
User:        brittonsmith
Date:        2016-08-05 07:32:24+00:00
Summary:     Adding center of mass info item.
Affected #:  1 file

diff -r aa07db06b9fbb1c28b6ab4ffbd714dd65842f81b -r 066218717cb3a21b1fcba5d2150687dfe1f00595 yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -41,7 +41,12 @@
             clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
         rv = clump.info[self.name]
         return rv[0] % rv[1]
-    
+
+def _center_of_mass(clump, units="code_length", **kwargs):
+    p = clump.quantities.center_of_mass(**kwargs)
+    return "Center of mass: %s.", p.to(units)
+add_clump_info("center_of_mass", _center_of_mass)
+
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
     return "Cells: %d.", n_cells


https://bitbucket.org/yt_analysis/yt/commits/a19f6a4c5e41/
Changeset:   a19f6a4c5e41
Branch:      yt
User:        brittonsmith
Date:        2016-08-05 13:47:29+00:00
Summary:     Allow fluid selection to read from different hdf5 groups.
Affected #:  1 file

diff -r 066218717cb3a21b1fcba5d2150687dfe1f00595 -r a19f6a4c5e41d7de1c6328096d5b44a0c18f2de1 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -42,14 +42,13 @@
                 rv.update(gf)
             if len(rv) == len(fields): return rv
             f = h5py.File(u(g.filename), "r")
-            gds = f["data"]
             for field in fields:
                 if field in rv:
                     self._hits += 1
                     continue
                 self._misses += 1
                 ftype, fname = field
-                rv[(ftype, fname)] = gds[fname].value
+                rv[(ftype, fname)] = f[ftype][fname].value
             if self._cache_on:
                 for gid in rv:
                     self._cached_fields.setdefault(gid, {})


https://bitbucket.org/yt_analysis/yt/commits/2d59cd968893/
Changeset:   2d59cd968893
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 09:08:10+00:00
Summary:     Use fluid reader for particles since it's the same structure.
Affected #:  1 file

diff -r a19f6a4c5e41d7de1c6328096d5b44a0c18f2de1 -r 2d59cd968893f74796c735c08a5555e68e5eefb9 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -482,6 +482,7 @@
                 particles.append((ftype, fname))
             elif (ftype, fname) not in fluids:
                 fluids.append((ftype, fname))
+
         # The _read method will figure out which fields it needs to get from
         # disk, and return a dict of those fields along with the fields that
         # need to be generated.
@@ -491,7 +492,7 @@
             self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
             self.field_data[f].convert_to_units(finfos[f].output_units)
 
-        read_particles, gen_particles = self.index._read_particle_fields(
+        read_particles, gen_particles = self.index._read_fluid_fields(
                                         particles, self, self._current_chunk)
         for f, v in read_particles.items():
             self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)


https://bitbucket.org/yt_analysis/yt/commits/6ae336447569/
Changeset:   6ae336447569
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 12:24:38+00:00
Summary:     Adding TreeContainer and implementing in Clump class.
Affected #:  3 files

diff -r 2d59cd968893f74796c735c08a5555e68e5eefb9 -r 6ae33644756988f3339b5630ab4bccf88eb1f914 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -28,6 +28,8 @@
     mylog
 from yt.extern.six import \
     string_types
+from yt.utilities.tree_container import \
+    TreeContainer
 
 from .clump_info_items import \
     clump_info_registry
@@ -53,7 +55,7 @@
                  display_field=False,
                  units='')
 
-class Clump(object):
+class Clump(TreeContainer):
     children = None
     def __init__(self, data, field, parent=None,
                  clump_info=None, validators=None,
@@ -183,12 +185,12 @@
                                        contour_key=contour_key,
                                        contour_id=cid))
 
-    def twalk(self):
+    def __iter__(self):
         yield self
         if self.children is None:
             return
         for child in self.children:
-            for a_node in child.twalk():
+            for a_node in child:
                 yield a_node
 
     def save_as_dataset(self, filename=None, fields=None):
@@ -227,7 +229,7 @@
         clump_info.update(
             dict([(field, []) for field in ["clump_id", "parent_id",
                                             "contour_key", "contour_id"]]))
-        for clump in self.twalk():
+        for clump in self:
             clump_info["clump_id"].append(clump.clump_id)
             if clump.parent is None:
                 parent_id = -1
@@ -283,7 +285,7 @@
                             ftypes[p_field] = p_field[0]
                             field_data[p_field] = self.base[p_field]
 
-                for clump in self.twalk():
+                for clump in self:
                     if clump.contour_key is None:
                         continue
                     for ptype in ptypes:

diff -r 2d59cd968893f74796c735c08a5555e68e5eefb9 -r 6ae33644756988f3339b5630ab4bccf88eb1f914 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -27,6 +27,8 @@
     YTDataContainerFieldInfo, \
     YTGridFieldInfo
 
+from yt.data_objects.data_containers import \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.data_objects.particle_unions import \
@@ -565,7 +567,7 @@
 
     def _setup_classes(self):
         # We don't allow geometric selection for non-spatial datasets
-        pass
+        self.objects = []
 
     @parallel_root_only
     def print_key_parameters(self):

diff -r 2d59cd968893f74796c735c08a5555e68e5eefb9 -r 6ae33644756988f3339b5630ab4bccf88eb1f914 yt/utilities/tree_container.py
--- /dev/null
+++ b/yt/utilities/tree_container.py
@@ -0,0 +1,33 @@
+"""
+TreeContainer class and member functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class TreeContainer(object):
+    r"""A recursive data container for like merger trees and
+    clump-finder trees.
+
+    """
+    _child_attr = "children"
+
+    def __init__(self):
+        setattr(self, self._child_attr, None)
+
+    def __iter__(self):
+        yield self
+        children = getattr(self, self._child_attr)
+        if children is None:
+            return
+        for child in children:
+            for a_node in child:
+                yield a_node


https://bitbucket.org/yt_analysis/yt/commits/ec36b1fab498/
Changeset:   ec36b1fab498
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 13:47:51+00:00
Summary:     Adding Clump dataset and container and allowing for non-float fields in nonspatial datasets.
Affected #:  1 file

diff -r 6ae33644756988f3339b5630ab4bccf88eb1f914 -r ec36b1fab498f53042934e6098097b2edb57037b yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -58,6 +58,8 @@
     _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.utilities.tree_container import \
+    TreeContainer
 from yt.fields.field_exceptions import \
     NeedsGridType
 from yt.data_objects.data_containers import \
@@ -491,14 +493,30 @@
         read_fluids, gen_fluids = self.index._read_fluid_fields(
                                         fluids, self, self._current_chunk)
         for f, v in read_fluids.items():
-            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
-            self.field_data[f].convert_to_units(finfos[f].output_units)
+            convert = True
+            if v.dtype != np.float64:
+                if finfos[f].units == "":
+                    self.field_data[f] = v
+                    convert = False
+                else:
+                    v = v.astype(np.float64)
+            if convert:
+                self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+                self.field_data[f].convert_to_units(finfos[f].output_units)
 
         read_particles, gen_particles = self.index._read_fluid_fields(
                                         particles, self, self._current_chunk)
         for f, v in read_particles.items():
-            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
-            self.field_data[f].convert_to_units(finfos[f].output_units)
+            convert = True
+            if v.dtype != np.float64:
+                if finfos[f].units == "":
+                    self.field_data[f] = v
+                    convert = False
+                else:
+                    v = v.astype(np.float64)
+            if convert:
+                self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+                self.field_data[f].convert_to_units(finfos[f].output_units)
 
         fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
@@ -683,3 +701,64 @@
             if data_type == "yt_profile":
                 return True
         return False
+
+class YTClumpContainer(TreeContainer):
+    def __init__(self, clump_id, global_id, parent_id,
+                 contour_key, contour_id, ds=None):
+        self.clump_id = clump_id
+        self.global_id = global_id
+        self.parent_id = parent_id
+        self.contour_key = contour_key
+        self.contour_id = contour_id
+        self.ds = ds
+        TreeContainer.__init__(self)
+
+    def add_child(self, child):
+        if self.children is None:
+            self.children = []
+        self.children.append(child)
+
+    def __repr__(self):
+        return "Clump[%d]" % self.clump_id
+
+    def __getitem__(self, field):
+        g = self.ds.data
+        f = g._determine_fields(field)[0]
+        if f[0] == "clump":
+            return g[f][self.global_id]
+        if self.contour_id == -1:
+            return g[f]
+        cfield = (f[0], "contours_%s" % self.contour_key)
+        return g[f][g[cfield] == self.contour_id]
+
+class YTClumpTreeDataset(YTNonspatialDataset):
+    """Dataset for saved clump-finder data."""
+    def __init__(self, filename, unit_system="cgs"):
+        super(YTClumpTreeDataset, self).__init__(filename,
+                                                 unit_system=unit_system)
+        self._load_tree()
+
+    def _load_tree(self):
+        my_tree = {}
+        for i, clump_id in enumerate(self.data[("clump", "clump_id")]):
+            my_tree[clump_id] = YTClumpContainer(
+                clump_id, i, self.data["clump", "parent_id"][i],
+                self.data["clump", "contour_key"][i],
+                self.data["clump", "contour_id"][i], self)
+        for clump in my_tree.values():
+            if clump.parent_id == -1:
+                self.tree = clump
+            else:
+                parent = my_tree[clump.parent_id]
+                parent.add_child(clump)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = parse_h5_attr(f, "data_type")
+            if data_type is None:
+                return False
+            if data_type == "yt_clump_tree":
+                return True
+        return False


https://bitbucket.org/yt_analysis/yt/commits/110d53d78643/
Changeset:   110d53d78643
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 15:27:54+00:00
Summary:     Adding leaves attribute.
Affected #:  1 file

diff -r ec36b1fab498f53042934e6098097b2edb57037b -r 110d53d786434d56c2e92b9f0f15edfce57fbdcd yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -752,6 +752,16 @@
                 parent = my_tree[clump.parent_id]
                 parent.add_child(clump)
 
+    _leaves = None
+    @property
+    def leaves(self):
+        if self._leaves is None:
+            self._leaves = []
+            for clump in self.tree:
+                if clump.children is None:
+                    self._leaves.append(clump)
+        return self._leaves
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False


https://bitbucket.org/yt_analysis/yt/commits/b7ea9528640f/
Changeset:   b7ea9528640f
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 15:45:13+00:00
Summary:     Adding to docstring.
Affected #:  1 file

diff -r 110d53d786434d56c2e92b9f0f15edfce57fbdcd -r b7ea9528640f6c8c48622ff1d25f22e3c0c62acc yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -218,6 +218,41 @@
         Examples
         --------
 
+        >>> import yt
+        >>> from yt.analysis_modules.level_sets.api import \
+        ...         Clump, find_clumps
+        >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+        ...                       (8, 'kpc'), (1, 'kpc'))
+        >>> field = ("gas", "density")
+        >>> step = 2.0
+        >>> c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+        >>> c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+        >>> master_clump = Clump(data_source, field)
+        >>> master_clump.add_info_item("center_of_mass")
+        >>> master_clump.add_validator("min_cells", 20)
+        >>> find_clumps(master_clump, c_min, c_max, step)
+        >>> fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+        >>> new_ds = yt.load(fn)
+        >>> print (ds.tree["clump", "cell_mass"])
+        1296926163.91 Msun
+        >>> print ds.tree["grid", "density"]
+        [  2.54398434e-26   2.46620353e-26   2.25120154e-26 ...,   1.12879234e-25
+           1.59561490e-25   1.09824903e-24] g/cm**3
+        >>> print ds.tree["all", "particle_mass"]
+        [  4.25472446e+38   4.25472446e+38   4.25472446e+38 ...,   2.04238266e+38
+           2.04523901e+38   2.04770938e+38] g
+        >>> print ds.tree.children[0]["clump", "cell_mass"]
+        909636495.312 Msun
+        >>> print ds.leaves[0]["clump", "cell_mass"]
+        3756566.99809 Msun
+        >>> print ds.leaves[0]["grid", "density"]
+        [  6.97820274e-24   6.58117370e-24   7.32046082e-24   6.76202430e-24
+           7.41184837e-24   6.76981480e-24   6.94287213e-24   6.56149658e-24
+           6.76584569e-24   6.94073710e-24   7.06713082e-24   7.22556526e-24
+           7.08338898e-24   6.78684331e-24   7.40647040e-24   7.03050456e-24
+           7.12438678e-24   6.56310217e-24   7.23201662e-24   7.17314333e-24] g/cm**3
+
         """
 
         ds = self.data.ds


https://bitbucket.org/yt_analysis/yt/commits/e21f27cc1dbb/
Changeset:   e21f27cc1dbb
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 15:55:55+00:00
Summary:     Add deprecation warnings on old functions.
Affected #:  1 file

diff -r b7ea9528640f6c8c48622ff1d25f22e3c0c62acc -r e21f27cc1dbba0bb80fa549939fff684cfae6a87 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -23,6 +23,7 @@
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
 from yt.funcs import \
+    deprecate, \
     get_output_filename, \
     iterable, \
     mylog
@@ -474,6 +475,7 @@
 
     return clump_list
 
+ at deprecate("Clump.save_as_dataset")
 def write_clump_index(clump, level, fh):
     top = False
     if isinstance(fh, string_types):
@@ -491,6 +493,7 @@
     if top:
         fh.close()
 
+ at deprecate("Clump.save_as_dataset")
 def write_clumps(clump, level, fh):
     top = False
     if isinstance(fh, string_types):


https://bitbucket.org/yt_analysis/yt/commits/94ce7e1464b7/
Changeset:   94ce7e1464b7
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 20:37:17+00:00
Summary:     Removing old hdf5 writers.
Affected #:  1 file

diff -r e21f27cc1dbba0bb80fa549939fff684cfae6a87 -r 94ce7e1464b7086ee8a1b0f91b7a402342f53971 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -509,46 +509,3 @@
             write_clumps(child, 0, fh)
     if top:
         fh.close()
-
-def write_clump_index_h5(clump, level, fh):
-    print level
-    top = False
-    if not isinstance(fh, h5py.File) and \
-      not isinstance(fh, h5py.Group):
-        fh = h5py.File(fh, "w")
-        top = True
-    for item in clump.clump_info:
-        item(clump)
-        my_info = clump.info[item.name][1]
-        fh.attrs[item.name] = my_info
-        if hasattr(my_info, "units") and \
-          "dimensionless" not in str(my_info.units):
-            units = str(my_info.units)
-        else:
-            units = ""
-        fh.attrs["%s_units" % item.name] = units
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        i = 0
-        for child in clump.children:
-            my_group = fh.create_group("child_%04d" % i)
-            write_clump_index_h5(child, (level+1), my_group)
-            i += 1
-    if top:
-        fh.close()
-        
-def write_clumps_h5(clump, filename):
-    clump_list = get_lowest_clumps(clump)
-    fh = h5py.File(filename, "w")
-    for item in clump.clump_info:
-        quantity = []
-        for my_clump in clump_list:
-            item(my_clump)
-            quantity.append(my_clump.info[item.name][1])
-        quantity = clump.data.ds.arr(quantity)
-        dataset = fh.create_dataset(item.name, data=quantity)
-        if "dimensionless" in str(quantity.units):
-            units = ""
-        else:
-            units = str(quantity.units)
-        dataset.attrs["units"] = units
-    fh.close()


https://bitbucket.org/yt_analysis/yt/commits/be6c4a73619d/
Changeset:   be6c4a73619d
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 20:50:52+00:00
Summary:     Updating cookbook recipe.
Affected #:  1 file

diff -r 94ce7e1464b7086ee8a1b0f91b7a402342f53971 -r be6c4a73619d42b42a8b039453267a64a939cb58 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -27,14 +27,14 @@
 # As many validators can be added as you want.
 master_clump.add_validator("min_cells", 20)
 
+# Calculate center of mass for all clumps.
+master_clump.add_info_item("center_of_mass")
+
 # Begin clump finding.
 find_clumps(master_clump, c_min, c_max, step)
 
-# Write out the full clump hierarchy.
-write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-# Write out only the leaf nodes of the hierarchy.
-write_clumps(master_clump,0, "%s_clumps.txt" % ds)
+# Save the clump tree as a reloadable dataset
+fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
 
 # We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
@@ -46,5 +46,17 @@
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
-# Lastly, we write the plot to disk.
+# Save the plot to disk.
 prj.save('clumps')
+
+# Reload the clump dataset.
+cds = yt.load(fn)
+
+# Query fields for clumps in the tree.
+print (cds.tree["clump", "center_of_mass"])
+print (cds.tree.children[0]["grid", "density"])
+print (cds.tree.children[1]["all", "particle_mass"])
+
+# Get all of the leaf clumps.
+print (cds.leaves)
+print (cds.leaves[0]["clump", "cell_mass"])


https://bitbucket.org/yt_analysis/yt/commits/367fbb63e3bc/
Changeset:   367fbb63e3bc
Branch:      yt
User:        brittonsmith
Date:        2016-08-06 20:56:12+00:00
Summary:     Removing imports.
Affected #:  1 file

diff -r be6c4a73619d42b42a8b039453267a64a939cb58 -r 367fbb63e3bcc1e00564343cb473fe791f211c21 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import copy
-import h5py
 import numpy as np
 import uuid
 


https://bitbucket.org/yt_analysis/yt/commits/2f11cc09dbe7/
Changeset:   2f11cc09dbe7
Branch:      yt
User:        brittonsmith
Date:        2016-08-07 07:00:47+00:00
Summary:     Removing unused imports and variables.
Affected #:  2 files

diff -r 367fbb63e3bcc1e00564343cb473fe791f211c21 -r 2f11cc09dbe7af88e02e095f3bcce63ec5170849 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -304,7 +304,6 @@
                 if ds.field_info[f].particle_type:
                     if f[0] not in ptypes:
                         ptypes.append(f[0])
-                    need_particle_positions = True
                     ftypes[f] = f[0]
                 else:
                     need_grid_positions = True

diff -r 367fbb63e3bcc1e00564343cb473fe791f211c21 -r 2f11cc09dbe7af88e02e095f3bcce63ec5170849 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -27,8 +27,6 @@
     YTDataContainerFieldInfo, \
     YTGridFieldInfo
 
-from yt.data_objects.data_containers import \
-    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.data_objects.particle_unions import \


https://bitbucket.org/yt_analysis/yt/commits/f3716bc37efa/
Changeset:   f3716bc37efa
Branch:      yt
User:        brittonsmith
Date:        2016-08-07 07:45:17+00:00
Summary:     Initialize particle mask for each instance.
Affected #:  1 file

diff -r 2f11cc09dbe7af88e02e095f3bcce63ec5170849 -r f3716bc37efa4c095a22d03b89278d0ec3bb99ae yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -773,6 +773,7 @@
         self.conditionals = ensure_list(conditionals)
         self.base_object = data_source
         self._selector = None
+        self._particle_mask = {}
         # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
         # ires and get_data
 
@@ -826,7 +827,6 @@
                 np.logical_and(res, ind, ind)
         return ind
 
-    _particle_mask = {}
     def _part_ind(self, ptype):
         if self._particle_mask.get(ptype) is None:
             parent = getattr(self, "parent", self.base_object)


https://bitbucket.org/yt_analysis/yt/commits/76d4d2fffec2/
Changeset:   76d4d2fffec2
Branch:      yt
User:        brittonsmith
Date:        2016-08-07 11:23:46+00:00
Summary:     Python3 fixes.
Affected #:  2 files

diff -r f3716bc37efa4c095a22d03b89278d0ec3bb99ae -r 76d4d2fffec23b0b19709368c28529b33f10c449 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -726,7 +726,7 @@
             return g[f][self.global_id]
         if self.contour_id == -1:
             return g[f]
-        cfield = (f[0], "contours_%s" % self.contour_key)
+        cfield = (f[0], "contours_%s" % self.contour_key.decode('utf-8'))
         return g[f][g[cfield] == self.contour_id]
 
 class YTClumpTreeDataset(YTNonspatialDataset):

diff -r f3716bc37efa4c095a22d03b89278d0ec3bb99ae -r 76d4d2fffec23b0b19709368c28529b33f10c449 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -136,6 +136,9 @@
             field_name = field[1]
         else:
             field_name = field
+        # thanks, python3
+        if data[field].dtype.kind == 'U':
+            data[field] = data[field].astype('|S40')
         _yt_array_hdf5(fh[field_type], field_name, data[field])
         if "num_elements" not in fh[field_type].attrs:
             fh[field_type].attrs["num_elements"] = data[field].size


https://bitbucket.org/yt_analysis/yt/commits/44449bb04283/
Changeset:   44449bb04283
Branch:      yt
User:        brittonsmith
Date:        2016-08-07 18:22:07+00:00
Summary:     Fixing particle field saving/querying.
Affected #:  2 files

diff -r 76d4d2fffec23b0b19709368c28529b33f10c449 -r 44449bb0428362428fdb61f20a20eabe672529fe yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -325,7 +325,7 @@
                         cfield = (ptype, "contours_%s" % clump.contour_key)
                         if cfield not in field_data:
                             field_data[cfield] = \
-                              -1 * clump.base.data[(ptype, "particle_ones")]
+                              clump.data._part_ind(ptype).astype(np.int64)
                             ftypes[cfield] = ptype
                         field_data[cfield][clump.data._part_ind(ptype)] = \
                           clump.contour_id

diff -r 76d4d2fffec23b0b19709368c28529b33f10c449 -r 44449bb0428362428fdb61f20a20eabe672529fe yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -708,6 +708,7 @@
         self.parent_id = parent_id
         self.contour_key = contour_key
         self.contour_id = contour_id
+        self.parent = None
         self.ds = ds
         TreeContainer.__init__(self)
 
@@ -715,6 +716,7 @@
         if self.children is None:
             self.children = []
         self.children.append(child)
+        child.parent = self
 
     def __repr__(self):
         return "Clump[%d]" % self.clump_id
@@ -727,7 +729,9 @@
         if self.contour_id == -1:
             return g[f]
         cfield = (f[0], "contours_%s" % self.contour_key.decode('utf-8'))
-        return g[f][g[cfield] == self.contour_id]
+        if f[0] == "grid":
+            return g[f][g[cfield] == self.contour_id]
+        return self.parent[f][g[cfield] == self.contour_id]
 
 class YTClumpTreeDataset(YTNonspatialDataset):
     """Dataset for saved clump-finder data."""


https://bitbucket.org/yt_analysis/yt/commits/6d9ef8030186/
Changeset:   6d9ef8030186
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 15:58:54+00:00
Summary:     Merging.
Affected #:  60 files

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -115,7 +116,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
@@ -484,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -510,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -528,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -557,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -1411,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1436,10 +1449,10 @@
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1494,7 +1507,7 @@
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -388,10 +388,10 @@
 To make things easier to load these sample datasets, you can add the parent
 directory to your downloaded sample data to your *yt path*.
 If you set the option ``test_data_dir``, in the section ``[yt]``,
-in ``~/.yt/config``, yt will search this path for them.
+in ``~/.config/yt/ytrc``, yt will search this path for them.
 
 This means you can download these datasets to ``/big_drive/data_for_yt`` , add
-the appropriate item to ``~/.yt/config``, and no matter which directory you are
+the appropriate item to ``~/.config/yt/ytrc``, and no matter which directory you are
 in when running yt, it will also check in *that* directory.
 
 
@@ -437,12 +437,11 @@
 hand, you may want it to output a lot more, since you can't figure out exactly what's going
 wrong, and you want to output some debugging information. The yt log level can be
 changed using the :ref:`configuration-file`, either by setting it in the
-``$HOME/.yt/config`` file:
+``$HOME/.config/yt/ytrc`` file:
 
 .. code-block:: bash
 
-   [yt]
-   loglevel = 10 # This sets the log level to "DEBUG"
+   $ yt config set yt loglevel 10  # This sets the log level to "DEBUG"
 
 which would produce debug (as well as info, warning, and error) messages, or at runtime:
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -133,6 +133,16 @@
      <tr valign="top"><td width="25%"><p>
+           <a href="sharing_data.html">Sharing Data</a>
+         </p>
+       </td>
+       <td width="75%">
+         <p class="linkdescr">The yt Hub</p>
+       </td>
+     </tr>
+     <tr valign="top">
+       <td width="25%">
+         <p><a href="reference/index.html">Reference Materials</a></p></td>
@@ -185,6 +195,7 @@
    analyzing/analysis_modules/index
    examining/index
    developing/index
+   sharing_data
    reference/index
    faq/index
    Getting Help <help/index>

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -54,35 +54,7 @@
 
 This will print the list of available subcommands,
 
-.. code-block:: bash
-
-    help                Print help message
-    bootstrap_dev       Bootstrap a yt development environment
-    bugreport           Report a bug in yt
-    hub_register        Register a user on the Hub: http://hub.yt-project.org/
-    hub_submit          Submit a mercurial repository to the yt Hub
-                        (http://hub.yt-project.org/), creating a BitBucket
-                        repo in the process if necessary.
-    instinfo            Get some information about the yt installation
-    version             Get some information about the yt installation (this
-                        is an alias for instinfo).
-    load                Load a single dataset into an IPython instance
-    mapserver           Serve a plot in a GMaps-style interface
-    pastebin            Post a script to an anonymous pastebin
-    pastebin_grab       Print an online pastebin to STDOUT for local use.
-    upload_notebook     Upload an IPython notebook to hub.yt-project.org.
-    plot                Create a set of images
-    rpdb                Connect to a currently running (on localhost) rpd
-                        session. Commands run with --rpdb will trigger an rpdb
-                        session with any uncaught exceptions.
-    notebook            Run the IPython Notebook
-    stats               Print stats and max/min value of a given field (if
-                        requested), for one or more datasets (default field is
-                        Density)
-    update              Update the yt installation to the most recent version
-    delete_image        Delete image from imgur.com.
-    upload_image        Upload an image to imgur.com. Must be PNG.
-
+.. config_help:: yt
 
 To execute any such function, simply run:
 
@@ -217,13 +189,12 @@
 
 This command will accept the filename of a ``.ipynb`` file (generated from an
 IPython notebook session) and upload it to the `yt hub
-<http://hub.yt-project.org/>` where others will be able to view it, and
+<https://hub.yt/>`__ where others will be able to view it, and
 download it.  This is an easy method for recording a sequence of commands,
 their output, narrative information, and then sharing that with others.  These
 notebooks will be viewable online, and the appropriate URLs will be returned on
 the command line.
 
-
 rpdb
 ++++
 
@@ -272,3 +243,95 @@
 The image uploaded using ``upload_image`` is assigned with a unique hash that
 can be used to remove it. This subcommand provides an easy way to send a delete
 request directly to the `imgur.com <http://imgur.com/>`_.
+
+Hub helper
+~~~~~~~~~~
+
+The :code:`yt hub` command-line tool allows to interact with the `yt hub
+<https://hub.yt>`__. The following subcommands are currently available:
+
+.. config_help:: yt hub
+
+register
+++++++++
+
+This subcommand starts an interactive process of creating an account on the `yt
+hub <https://hub.yt/>`__. Please note that the yt Hub also supports multiple OAuth
+providers such as Google, Bitbucket and GitHub for authentication. 
+See :ref:`hub-APIkey` for more information.
+
+start
++++++
+
+This subcommand launches the Jupyter Notebook on the `yt Hub <https://hub.yt>`__
+with a chosen Hub folder mounted to the ``/data`` directory inside the notebook.
+If no path is given all the `example yt datasets
+<https://yt-project.org/data>`_ are mounted by default. The appropriate URL
+allowing to access the Notebook will be returned on the commandline. 
+
+Example:
+
+.. code-block:: bash
+
+   $ yt hub start
+   $ yt hub start /user/xarthisius/Public
+
+
+Config helper
+~~~~~~~~~~~~~
+
+The :code:`yt config` command-line tool allows you to modify and access yt's
+configuration without manually locating and opening the config file in an editor.
+To get a quick list of available commands, just type:
+
+.. code-block:: bash
+
+   yt config -h
+
+This will print the list of available subcommands:
+
+.. config_help:: yt config
+
+Since the yt version 3.3.2, the previous location of the configuration file
+(``$HOME/.yt/config``) has been deprecated in favor of a location adhering to the
+`XDG Base Directory Specification
+<https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+(``$XDG_HOME_CONFIG/yt/ytrc``). In order to perform an automatic migration of
+the old config, you are encouraged to run:
+
+.. code-block:: bash
+
+   yt config migrate
+
+that will copy your current config file to the new location and store a backup
+copy as ``$HOME/.yt/config.bak``.
+
+Examples
+++++++++
+
+Listing current content of the config file:
+
+.. code-block:: bash
+
+   $ yt config list
+   [yt]
+   loglevel = 50
+
+Obtaining a single config value by name:
+
+.. code-block:: bash
+
+   $ yt config get yt loglevel
+   50
+
+Changing a single config value:
+
+.. code-block:: bash
+
+   $ yt config set yt loglevel 10
+
+Removing a single config entry:
+
+.. code-block:: bash
+
+   $ yt config rm yt loglevel

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -18,9 +18,9 @@
 Configuration File Format
 ^^^^^^^^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/config`` as a configuration
+yt will look for and recognize the file ``$HOME/.config/yt/ytrc`` as a configuration
 file, containing several options that can be modified and adjusted to control
-runtime behavior.  For example, a sample ``$HOME/.yt/config`` file could look
+runtime behavior.  For example, a sample ``$HOME/.config/yt/ytrc`` file could look
 like:
 
 .. code-block:: none
@@ -31,7 +31,17 @@
 
 This configuration file would set the logging threshold much lower, enabling
 much more voluminous output from yt.  Additionally, it increases the number of
-datasets tracked between instantiations of yt.
+datasets tracked between instantiations of yt. The configuration file can be
+managed using the ``yt config`` helper. It can list, add, modify and remove
+options from the configuration file, e.g.:
+
+.. code-block:: none
+
+   $ yt config -h
+   $ yt config list
+   $ yt config set yt loglevel 1
+   $ yt config rm yt maximumstoreddatasets
+
 
 Configuration Options At Runtime
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/sharing_data.rst
--- /dev/null
+++ b/doc/source/sharing_data.rst
@@ -0,0 +1,117 @@
+.. _sharing-data:
+
+The yt Hub
+==========
+
+.. contents::
+   :depth: 2
+   :local:
+   :backlinks: none
+
+What is the yt Hub?
+-------------------
+
+The yt data Hub is a mechanism by which images, data objects and projects can be
+shared with other people. For instance, one can upload a dataset and allow other
+people to remotely analyze it with a jupyter notebook or upload notebooks and
+view them from any web browser.
+
+.. note:: All items posted on the hub are public!
+
+Over time, more widgets will be added, and more datatypes will be able to be
+uploaded.  If you are interested in adding more ways of sharing data, please
+email the developers' list.  We would like to add support for 3D widgets such
+as isocontours as well as interactive binning and rebinning of data from yt
+data objects, to be displayed as phase plots and profiles.
+
+.. note:: Working with the Hub requires additional dependencies to be installed.
+          You can obtain them by running: ``pip install yt[hub]``. 
+
+.. _hub-APIkey:
+
+Obtaining an API key
+--------------------
+
+In order to interact with the yt Hub, you need to obtain API key, which is
+available only for authenticated users. You can `log into
+<https://girder.hub.yt/#?dialog=login>`_ the Hub using your Google, GitHub or
+Bitbucket account. After you log in, an API key can be generated under the *My
+account* page, which can be accessed through the dropdown menu in the upper
+right corner. 
+
+.. image:: _static/apiKey01.jpg
+   :width: 50 %
+
+Select the *API keys* tab and press *Create new key* button:
+
+.. image:: _static/apiKey02.jpg
+   :width: 50 %
+
+By convention, the *Name* field of API keys can be used to specify what
+application is making use of the key in a human-readable way e.g. ``yt
+command``, although you may name your key however you want.
+
+.. image:: _static/apiKey03.jpg
+   :width: 50 %
+
+After the API Key is created you can obtain it by clicking *show* link:
+
+.. image:: _static/apiKey04.jpg
+   :width: 50 %
+
+For more information about API keys please see `this document
+<http://girder.readthedocs.io/en/latest/user-guide.html?highlight=API%20keys#api-keys>`__.
+
+After you have gotten your API key, update your config file:
+
+.. code-block:: none
+
+   $ yt config set yt hub_api_key 3fd1de56c2114c13a2de4dd51g10974b
+
+Replace ``3fd1de56c2114c13a2de4dd51g10974b`` with your API key.
+
+Registering a User
+^^^^^^^^^^^^^^^^^^
+
+If you do not wish to use OAuth authentication, you can create a Hub account
+using ``yt`` command. To register a user:
+
+.. code-block:: bash
+
+   $ yt hub register
+
+This will walk you through the process of registering. You will need to supply
+a name, a username, a password and an email address. Apart from creating a new
+user account, it will also generate an API key and append it to the yt's config
+file.  At this point, you're ready to go!
+
+What Can Be Uploaded
+--------------------
+
+Currently, the yt hub can accept these types of data:
+
+ * Raw data files, scripts.
+ * IPython notebooks: these are stored on the hub and are made available for
+   download and via the IPython `nbviewer <http://nbviewer.ipython.org/>`_
+   service.
+
+How to Upload Data
+------------------
+
+Uploading data can be performed using the ``girder-cli`` command tool or
+directly via the web interface. Please refer to ``girder-cli`` `documentation page
+<http://girder.readthedocs.io/en/latest/python-client.html>`_ for additional
+information.
+
+Uploading Notebooks
+^^^^^^^^^^^^^^^^^^^
+
+Notebooks can be uploaded from the bash command line:
+
+.. code-block:: bash
+
+   yt upload_notebook notebook_file.ipynb
+
+After the notebook is finished uploading, yt will print a link to the raw
+notebook as well as an nbviewer link to the same notebook.  Your notebooks will
+be stored under your hub Public directory.

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -278,17 +278,17 @@
 Overplot Cell Edges
 ~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_cell_edges(line_width=1.0, alpha = 1.0,
-                                  color = (0.0, 0.0, 0.0))
+.. function:: annotate_cell_edges(line_width=0.002, alpha=1.0, color='black')
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.CellEdgesCallback`.)
 
-    Annotate the edges of cells, where the ``line_width`` in pixels is specified.
-    The ``alpha`` of the overlaid image and the ``color`` of the lines are also
-    specifiable.  Note that because the lines are drawn from both sides of a
-    cell, the image sometimes has the effect of doubling the line width.
-    Color here is in RGB float values (0 to 1).
+    Annotate the edges of cells, where the ``line_width`` relative to size of
+    the longest plot axis is specified.  The ``alpha`` of the overlaid image and
+    the ``color`` of the lines are also specifiable.  Note that because the
+    lines are drawn from both sides of a cell, the image sometimes has the
+    effect of doubling the line width.  Color here is a matplotlib color name or
+    a 3-tuple of RGB float values.
 
 .. python-script::
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -17,6 +17,24 @@
 plots of slices, projections, 1D profiles, and 2D profiles (phase plots), all of
 which are described below.
 
+.. _viewing-plots:
+
+Viewing Plots
+-------------
+
+YT uses an environment neutral plotting mechanism that detects the appropriate
+matplotlib configuration for a given environment, however it defaults to a basic
+renderer. To utilize interactive plots in matplotlib supported
+environments (Qt, GTK, WX, etc.) simply call the ``toggle_interactivity()`` function. Below is an
+example in a jupyter notebook environment, but the same command should work
+in other environments as well:
+
+.. code-block:: python
+ 
+   %matplotlib notebook
+   import yt
+   yt.toggle_interactivity()
+
 .. _simple-inspection:
 
 Slices & Projections
@@ -519,6 +537,27 @@
    slc.set_center((0.5, 0.503))
    slc.save()
 
+Flipping the plot view axes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+By default, all :class:`~yt.visualization.plot_window.PlotWindow` objects plot
+with the assumption that the eastern direction on the plot forms a right handed
+coordinate system with the ``normal`` and ``north_vector`` for the system, whether
+explicitly or implicitly defined. This setting can be toggled or explicitly defined
+by the user at initialization:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   #slicing with non right-handed coordinates
+   slc = yt.SlicePlot(ds, 'x', 'velocity_x', right_handed=False)
+   slc.annotate_title('Not Right Handed')
+   slc.save("NotRightHanded.png")
+
+   #switching to right-handed coordinates
+   slc.toggle_right_handed()
+   slc.annotate_title('Right Handed')
+   slc.save("Standard.png")
 
 .. _hiding-colorbar-and-axes:
 
@@ -686,6 +725,7 @@
    slc.set_cbar_minorticks('all', 'off')
    slc.save()
 
+
 .. _matplotlib-customization:
 
 Further customization via matplotlib

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -105,7 +105,7 @@
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your
-``~/.yt/config`` file under the heading ``[yt]`` as the variable
+``~/.config/yt/ytrc`` file under the heading ``[yt]`` as the variable
 ``sketchfab_api_key``.  If you don't want to do this, you can also supply it as
 an argument to the function ``export_sketchfab``.
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -118,7 +118,7 @@
     from yt.visualization.api import Streamlines
 
     ds = yt.load('DD1701') # Load ds
-    streamlines = Streamlines(ds, [0.5]*3)
+    streamlines = Streamlines(ds, ds.domain_center)
     streamlines.integrate_through_volume()
     stream = streamlines.path(0)
     matplotlib.pylab.semilogy(stream['t'], stream['density'], '-x')

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,40 +3,46 @@
 Unstructured Mesh Rendering
 ===========================
 
-Installation
-^^^^^^^^^^^^
+Beginning with version 3.3, yt has the ability to volume render unstructured
+mesh data like that created by finite element calculations. No additional 
+dependencies are required in order to use this feature. However, it is 
+possible to speed up the rendering operation by installing with 
+`Embree <https://embree.github.io>`_ support. Embree is a fast ray-tracing
+library from Intel that can substantially speed up the mesh rendering operation
+on large datasets. You can read about how to install yt with Embree support 
+below, or you can skip to the examples.
 
-Beginning with version 3.3, yt has the ability to volume render unstructured
-mesh data like that created by finite element calculations. In order to use
-this capability, a few additional dependencies are required. The easiest way
-to install yt with unstructured mesh support is to use conda to install the
+Optional Embree Installation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to install yt with Embree support is to use conda to install the
 most recent development version of yt from our channel:
 
 .. code-block:: bash
 
     conda install -c http://use.yt/with_conda/ yt
 
-If you want to install from source, you can use the ``get_yt.sh`` script.
-Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the
-top of the script. The ``get_yt.sh`` script can be downloaded by doing:
+Alternatively, you can install yt from source using the ``install_script.sh`` 
+script. Be sure to set the INST_CONDA, INST_YT_SOURCE, INST_EMBREE, 
+and INST_NETCDF4 flags to 1 at the top of the script. The ``install_script.sh`` 
+script can be downloaded by doing:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/get_yt.sh
+  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 and then run like so:
 
 .. code-block:: bash
 
-  bash get_yt.sh
+  bash install_script.sh
 
-Alternatively, you can install the additional dependencies by hand.
-First, `embree <https://embree.github.io>`_
-(a fast software ray-tracing library from Intel) must be installed, either
-by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page.
+Finally, you can install the additional dependencies by hand.
+First, you will need to install Embree, either by compiling from source 
+or by using one of the pre-built binaries available at Embree's 
+`downloads <https://embree.github.io/downloads.html>`_ page.
 
-Second, the python bindings for embree (called
+Second, the python bindings for Embree (called
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
@@ -54,7 +60,7 @@
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-Once embree and pyembree are installed, you must rebuild yt from source in order to use
+Once Embree and pyembree are installed, you must rebuild yt from source in order to use
 the unstructured mesh rendering capability. Once again, if embree is installed in a
 location that is not part of your default search path, you must tell yt where to find it.
 There are a number of ways to do this. One way is to again manually pass in the flags
@@ -84,20 +90,6 @@
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.
 
-Once the pre-requisites are installed, unstructured mesh data can be rendered
-much like any other dataset. In particular, a new type of
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` object
-has been defined, called the
-:class:`~yt.visualization.volume_rendering.render_source.MeshSource`, that
-represents the unstructured mesh data that will be rendered. The user creates
-this object, and also defines a
-:class:`~yt.visualization.volume_rendering.camera.Camera`
-that specifies your viewpoint into the scene. When
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
-a set of rays are cast at the source. Each time a ray strikes the source mesh,
-the data is sampled at the intersection point at the resulting value gets
-saved into an image. See below for examples.
-
 Examples
 ^^^^^^^^
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc setup.py
--- a/setup.py
+++ b/setup.py
@@ -380,6 +380,9 @@
         'IPython',
         'cython',
     ],
+    extras_require = {
+        'hub':  ["girder_client"]
+    },
     cmdclass={'sdist': sdist, 'build_ext': build_ext, 'build_py': build_py},
     author="The yt project",
     author_email="yt-dev at lists.spacepope.org",

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_001:
+  local_pw_006:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -73,6 +73,9 @@
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
 
+  local_axialpix_001:
+    - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
+
 other_tests:
   unittests:
      - '-v'

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -93,7 +93,8 @@
     parallel_profile, \
     enable_plugins, \
     memory_checker, \
-    deprecated_class
+    deprecated_class, \
+    toggle_interactivity
 from yt.utilities.logger import ytLogger as mylog
 
 import yt.utilities.physical_constants as physical_constants

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -45,12 +45,12 @@
        lower wavelength bound in angstroms.
     lambda_max : float
        upper wavelength bound in angstroms.
-    n_lambda : float
+    n_lambda : int
        number of wavelength bins.
     """
 
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        self.n_lambda = n_lambda
+        self.n_lambda = int(n_lambda)
         # lambda, flux, and tau are wavelength, flux, and optical depth
         self.lambda_min = lambda_min
         self.lambda_max = lambda_max
@@ -301,7 +301,7 @@
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
+            pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
@@ -550,8 +550,9 @@
         """
         mylog.info("Writing spectrum to fits file: %s.", filename)
         col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
-        col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
-        cols = pyfits.ColDefs([col1, col2])
+        col2 = pyfits.Column(name='tau', format='E', array=self.tau_field)
+        col3 = pyfits.Column(name='flux', format='E', array=self.flux_field)
+        cols = pyfits.ColDefs([col1, col2, col3])
         tbhdu = pyfits.BinTableHDU.from_columns(cols)
         tbhdu.writeto(filename, clobber=True)
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -105,7 +105,7 @@
         if self.children is None: return
         for child in self.children:
             child.add_validator(validator)
-        
+
     def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,6 +16,7 @@
 #-----------------------------------------------------------------------------
 
 import os
+import warnings
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(
@@ -48,8 +49,9 @@
     test_storage_dir = '/does/not/exist',
     test_data_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://hub.yt-project.org/upload',
+    hub_url = 'https://girder.hub.yt/api/v1',
     hub_api_key = '',
+    hub_sandbox = '/collection/yt_sandbox/data',
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
@@ -67,20 +69,28 @@
     default_colormap = 'arbre',
     ray_tracing_engine = 'embree',
     )
+
+CONFIG_DIR = os.environ.get(
+    'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config', 'yt'))
+if not os.path.exists(CONFIG_DIR):
+    os.makedirs(CONFIG_DIR)
+
+CURRENT_CONFIG_FILE = os.path.join(CONFIG_DIR, 'ytrc')
+_OLD_CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.yt', 'config')
+
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten
 # without them.
 
-__fn = os.path.expanduser("~/.yt/config")
-if os.path.exists(__fn):
-    f = open(__fn).read()
+if os.path.exists(_OLD_CONFIG_FILE):
+    f = open(_OLD_CONFIG_FILE).read()
     if any(header in f for header in ["[lagos]","[raven]","[fido]","[enki]"]):
         print("***********************************************************")
         print("* Upgrading configuration file to new format; saving old. *")
         print("***********************************************************")
         # This is of the old format
         cp = configparser.ConfigParser()
-        cp.read(__fn)
+        cp.read(_OLD_CONFIG_FILE)
         # NOTE: To avoid having the 'DEFAULT' section here,
         # we are not passing in ytcfg_defaults to the constructor.
         new_cp = configparser.ConfigParser()
@@ -91,16 +101,21 @@
                 if option.lower() in ytcfg_defaults:
                     new_cp.set("yt", option, cp.get(section, option))
                     print("Setting %s to %s" % (option, cp.get(section, option)))
-        open(__fn + ".old", "w").write(f)
-        new_cp.write(open(__fn, "w"))
-# Pathological check for Kraken
-#elif os.path.exists("~/"):
-#    if not os.path.exists("~/.yt"):
-#            print "yt is creating a new directory, ~/.yt ."
-#            os.mkdir(os.path.exists("~/.yt/"))
-#    # Now we can read in and write out ...
-#    new_cp = configparser.ConfigParser(ytcfg_defaults)
-#    new_cp.write(__fn)
+        open(_OLD_CONFIG_FILE + ".old", "w").write(f)
+        new_cp.write(open(_OLD_CONFIG_FILE, "w"))
+
+    msg = (
+        "The configuration file {} is deprecated. "
+        "Please migrate your config to {} by running: "
+        "'yt config migrate'"
+    )
+    warnings.warn(msg.format(_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE))
+
+if not os.path.exists(CURRENT_CONFIG_FILE):
+    cp = configparser.ConfigParser()
+    cp.add_section("yt")
+    with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
+        cp.write(new_cfg)
 
 class YTConfigParser(configparser.ConfigParser):
     def __setitem__(self, key, val):
@@ -108,12 +123,8 @@
     def __getitem__(self, key):
         self.get(key[0], key[1])
 
-if os.path.exists(os.path.expanduser("~/.yt/config")):
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg', os.path.expanduser('~/.yt/config')])
-else:
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg'])
+ytcfg = YTConfigParser(ytcfg_defaults)
+ytcfg.read([_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE, 'yt.cfg'])
 if not ytcfg.has_section("yt"):
     ytcfg.add_section("yt")
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -127,4 +127,3 @@
 
     return simulation_time_series_registry[simulation_type](parameter_filename,
                                                             find_outputs=find_outputs)
-

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -34,6 +34,7 @@
         ("cell_centered_B_x", (b_units, [], None)),
         ("cell_centered_B_y", (b_units, [], None)),
         ("cell_centered_B_z", (b_units, [], None)),
+        ("gravitational_potential", ("code_velocity**2", ["gravitational_potential"], None)),
     )
 
 # In Athena, conservative or primitive variables may be written out.

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -181,6 +181,15 @@
         ("magmom", ("g*cm/s", ["momentum_magnitude"], r"\rho |\mathbf{U}|")),
         ("maggrav", ("cm/s**2", [], r"|\mathbf{g}|")),
         ("phiGrav", ("erg/g", [], r"\Phi")),
+        ("enuc", ("erg/(g*s)", [], r"\dot{e}_{\rm{nuc}}")),
+        ("rho_enuc", ("erg/(cm**3*s)", [], r"\rho \dot{e}_{\rm{nuc}}")),
+        ("angular_momentum_x", ("g/(cm*s)", [], r"\ell_x")),
+        ("angular_momentum_y", ("g/(cm*s)", [], r"\ell_y")),
+        ("angular_momentum_z", ("g/(cm*s)", [], r"\ell_z")),
+        ("phiRot", ("erg/g", [], r"\Phi_{\rm{rot}}")),
+        ("rot_x", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_x")),
+        ("rot_y", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_y")),
+        ("rot_z", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_z")),
     )
 
     def setup_fluid_fields(self):

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/frontends/rockstar/definitions.py
--- a/yt/frontends/rockstar/definitions.py
+++ b/yt/frontends/rockstar/definitions.py
@@ -38,7 +38,7 @@
 # Note the final field here, which is a field for min/max format revision in
 # which the field appears.
 
-KNOWN_REVISIONS=[0, 1]
+KNOWN_REVISIONS=[0, 1, 2]
 
 halo_dt = [
     ('particle_identifier', np.int64),
@@ -101,6 +101,12 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
+    ('type', np.int32, (2, 100)),
+    ('sm', np.float32, (2, 100)),
+    ('gas', np.float32, (2, 100)),
+    ('bh', np.float32, (2, 100)),
+    ('peak_density', np.float32, (2, 100)),
+    ('av_density', np.float32, (2, 100)),
 ]
 
 halo_dts = {}

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -668,7 +668,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+        grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
 
     if length_unit is None:
         length_unit = 'code_length'

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,7 @@
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
+        ("metallicity", ("Zsun", ["metallicity"], None)),
 
         # We need to have a bunch of species fields here, too
         ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -16,7 +16,7 @@
 
 import errno
 from yt.extern.six import string_types
-from yt.extern.six.moves import input
+from yt.extern.six.moves import input, builtins
 import time
 import inspect
 import traceback
@@ -986,3 +986,21 @@
     except ImportError:
         pass
     return dummy_context_manager()
+
+interactivity = False
+
+"""Sets the condition that interactive backends can be used."""
+def toggle_interactivity():
+    global interactivity
+    interactivity = not interactivity
+    if interactivity is True:
+        if '__IPYTHON__' in dir(builtins):
+            import IPython
+            shell = IPython.get_ipython()
+            shell.magic('matplotlib')
+        else:
+            import matplotlib
+            matplotlib.interactive(True)
+
+def get_interactivity():
+    return interactivity

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -113,7 +113,7 @@
             # re-order the array and squeeze out the dummy dim
             return np.squeeze(np.transpose(img, (yax, xax, ax)))
 
-        elif dimension < 3:
+        elif self.axis_id.get(dimension, dimension) < 3:
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias, dimension, periodic)
         else:

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/geometry/coordinates/tests/test_axial_pixelization.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_axial_pixelization.py
@@ -0,0 +1,9 @@
+from yt.testing import \
+    fake_amr_ds, _geom_transforms
+from yt.utilities.answer_testing.framework import \
+    AxialPixelizationTest
+
+def test_axial_pixelization():
+    for geom in sorted(_geom_transforms):
+        ds = fake_amr_ds(geometry=geom)
+        yield AxialPixelizationTest(ds)

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -480,6 +480,9 @@
     test_unit = Unit('m_geom/l_geom**3')
     assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
 
+    test_unit = Unit('1e9*cm')
+    assert_equal(test_unit.latex_repr, '1.0 \\times 10^{9}\\ \\rm{cm}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -926,12 +926,12 @@
     yt_quan2 = YTQuantity.from_pint(p_quan)
 
     yield assert_array_equal, p_arr, yt_arr.to_pint()
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_array_equal, yt_arr, YTArray.from_pint(p_arr)
     yield assert_array_equal, yt_arr, yt_arr2
 
     yield assert_equal, p_quan.magnitude, yt_quan.to_pint().magnitude
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_equal, yt_quan, YTQuantity.from_pint(p_quan)
     yield assert_equal, yt_quan, yt_quan2
 

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -127,10 +127,20 @@
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):
             expr = expr.subs(symbols[i], symbols[0])
-
+    prefix = None
+    if isinstance(expr, Mul):
+        coeffs = expr.as_coeff_Mul()
+        if coeffs[0] == 1 or not isinstance(coeffs[0], Float):
+            pass
+        else:
+            expr = coeffs[1]
+            prefix = Float(coeffs[0], 2)
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
 
+    if prefix is not None:
+        latex_repr = latex(prefix, mul_symbol="times") + '\\ ' + latex_repr
+
     if latex_repr == '1':
         return ''
     else:

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -713,7 +713,7 @@
         >>> c = yt.YTArray.from_pint(b)
         """
         p_units = []
-        for base, exponent in arr.units.items():
+        for base, exponent in arr._units.items():
             bs = convert_pint_units(base)
             p_units.append("%s**(%s)" % (bs, Rational(exponent)))
         p_units = "*".join(p_units)

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -324,6 +324,8 @@
             self.ds = data_dir_load(ds_fn)
 
     def __call__(self):
+        if AnswerTestingTest.result_storage is None:
+            return
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
@@ -860,6 +862,47 @@
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
 
+class AxialPixelizationTest(AnswerTestingTest):
+    # This test is typically used once per geometry or coordinates type.
+    # Feed it a dataset, and it checks that the results of basic pixelization
+    # don't change.
+    _type_name = "AxialPixelization"
+    _attrs = ('geometry',)
+    def __init__(self, ds_fn, decimals=None):
+        super(AxialPixelizationTest, self).__init__(ds_fn)
+        self.decimals = decimals
+        self.geometry = self.ds.coordinates.name
+
+    def run(self):
+        rv = {}
+        ds = self.ds
+        for i, axis in enumerate(ds.coordinates.axis_order):
+            (bounds, center, display_center) = \
+                    pw.get_window_parameters(axis, ds.domain_center, None, ds)
+            slc = ds.slice(axis, center[i])
+            xax = ds.coordinates.axis_name[ds.coordinates.x_axis[axis]]
+            yax = ds.coordinates.axis_name[ds.coordinates.y_axis[axis]]
+            pix_x = ds.coordinates.pixelize(axis, slc, xax, bounds, (512, 512))
+            pix_y = ds.coordinates.pixelize(axis, slc, yax, bounds, (512, 512))
+            # Wipe out all NaNs
+            pix_x[np.isnan(pix_x)] = 0.0
+            pix_y[np.isnan(pix_y)] = 0.0
+            rv['%s_x' % axis] = pix_x
+            rv['%s_y' % axis] = pix_y
+        return rv
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_almost_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose_units(new_result[k], old_result[k],
+                                      10**(-self.decimals))
+
+
 def requires_sim(sim_fn, sim_type, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -26,7 +26,7 @@
 import json
 import pprint
 
-from yt.config import ytcfg
+from yt.config import ytcfg, CURRENT_CONFIG_FILE
 ytcfg["yt","__command_line"] = "True"
 from yt.startup_tasks import parser, subparsers
 from yt.funcs import \
@@ -39,11 +39,13 @@
     enable_plugins
 from yt.extern.six import add_metaclass, string_types
 from yt.extern.six.moves import urllib, input
+from yt.extern.six.moves.urllib.parse import urlparse
 from yt.convenience import load
 from yt.visualization.plot_window import \
     SlicePlot, \
     ProjectionPlot
 from yt.utilities.metadata import get_metadata
+from yt.utilities.configure import set_config
 from yt.utilities.exceptions import \
     YTOutputNotIdentified, YTFieldNotParseable
 
@@ -117,16 +119,48 @@
         print("Changeset = %s" % vstring.strip().decode("utf-8"))
     print("---")
     return vstring
+    
 
+def _get_girder_client():
+    try:
+        import girder_client
+    except ImportError:
+        print("this command requires girder_client to be installed")
+        print("Please install them using your python package manager, e.g.:")
+        print("   pip install girder_client --user")
+        exit()
+
+    hub_url = urlparse(ytcfg.get("yt", "hub_url"))
+    gc = girder_client.GirderClient(apiUrl=hub_url.geturl())
+    gc.authenticate(apiKey=ytcfg.get("yt", "hub_api_key"))
+    return gc
+
+
+_subparsers = {None: subparsers}
+_subparsers_description = {
+    'config': 'Get and set configuration values for yt',
+    'hub': 'Interact with the yt Hub'
+}
 class YTCommandSubtype(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
         if cls.name is not None:
             names = ensure_list(cls.name)
+            if cls.subparser not in _subparsers:
+                try:
+                    description = _subparsers_description[cls.subparser]
+                except KeyError:
+                    description = cls.subparser
+                parent_parser = argparse.ArgumentParser(add_help=False)
+                p = subparsers.add_parser(cls.subparser, help=description,
+                                          description=description,
+                                          parents=[parent_parser])
+                _subparsers[cls.subparser] = p.add_subparsers(
+                    title=cls.subparser, dest=cls.subparser)
+            sp = _subparsers[cls.subparser]
             for name in names:
-                sc = subparsers.add_parser(name,
-                    description = cls.description,
-                    help = cls.description)
+                sc = sp.add_parser(name, description=cls.description, 
+                                   help=cls.description)
                 sc.set_defaults(func=cls.run)
                 for arg in cls.args:
                     _add_arg(sc, arg)
@@ -138,6 +172,7 @@
     description = ""
     aliases = ()
     ndatasets = 1
+    subparser = None
 
     @classmethod
     def run(cls, args):
@@ -557,25 +592,27 @@
 
 
 class YTHubRegisterCmd(YTCommand):
-    name = "hub_register"
+    subparser = "hub"
+    name = "register"
     description = \
         """
-        Register a user on the Hub: http://hub.yt-project.org/
+        Register a user on the yt Hub: http://hub.yt/
         """
     def __call__(self, args):
-        # We need these pieces of information:
-        #   1. Name
-        #   2. Email
-        #   3. Username
-        #   4. Password (and password2)
-        #   5. (optional) URL
-        #   6. "Secret" key to make it epsilon harder for spammers
-        if ytcfg.get("yt","hub_api_key") != "":
+        try:
+            import requests
+        except ImportError:
+            print("yt {} requires requests to be installed".format(self.name))
+            print("Please install them using your python package manager, e.g.:")
+            print("   pip install requests --user")
+            exit()
+        if ytcfg.get("yt", "hub_api_key") != "":
             print("You seem to already have an API key for the hub in")
-            print("~/.yt/config .  Delete this if you want to force a")
+            print("{} . Delete this if you want to force a".format(CURRENT_CONFIG_FILE))
             print("new user registration.")
+            exit()
         print("Awesome!  Let's start by registering a new user for you.")
-        print("Here's the URL, for reference: http://hub.yt-project.org/ ")
+        print("Here's the URL, for reference: http://hub.yt/ ")
         print()
         print("As always, bail out with Ctrl-C at any time.")
         print()
@@ -586,8 +623,11 @@
         print()
         print("To start out, what's your name?")
         print()
-        name = input("Name? ")
-        if len(name) == 0: sys.exit(1)
+        first_name = input("First Name? ")
+        if len(first_name) == 0: sys.exit(1)
+        print()
+        last_name = input("Last Name? ")
+        if len(last_name) == 0: sys.exit(1)
         print()
         print("And your email address?")
         print()
@@ -604,33 +644,32 @@
             print("Sorry, they didn't match!  Let's try again.")
             print()
         print()
-        print("Would you like a URL displayed for your user?")
-        print("Leave blank if no.")
-        print()
-        url = input("URL? ")
-        print()
         print("Okay, press enter to register.  You should receive a welcome")
         print("message at %s when this is complete." % email)
         print()
         input()
-        data = dict(name = name, email = email, username = username,
-                    password = password1, password2 = password2,
-                    url = url, zap = "rowsdower")
-        data = urllib.parse.urlencode(data)
-        hub_url = "https://hub.yt-project.org/create_user"
-        req = urllib.request.Request(hub_url, data)
-        try:
-            urllib.request.urlopen(req).read()
-        except urllib.error.HTTPError as exc:
-            if exc.code == 400:
-                print("Sorry, the Hub couldn't create your user.")
-                print("You can't register duplicate users, which is the most")
-                print("common cause of this error.  All values for username,")
-                print("name, and email must be unique in our system.")
-                sys.exit(1)
-        except urllib.URLError as exc:
-            print("Something has gone wrong.  Here's the error message.")
-            raise exc
+
+        data = dict(firstName=first_name, email=email, login=username,
+                    password=password1, lastName=last_name, admin=False)
+        hub_url = ytcfg.get("yt", "hub_url")
+        req = requests.post(hub_url + "/user", data=data)
+      
+        if req.ok:
+            headers = {'Girder-Token': req.json()['authToken']['token']}
+        else:
+            if req.status_code == 400:
+                print("Registration failed with 'Bad request':")
+                print(req.json()["message"])
+            exit(1)
+        print("User registration successful")
+        print("Obtaining API key...")
+        req = requests.post(hub_url + "/api_key", headers=headers,
+                            data={'name': 'ytcmd', 'active': True})
+        apiKey = req.json()["key"]
+
+        print("Storing API key in configuration file")
+        set_config("yt", "hub_api_key", apiKey)
+        
         print()
         print("SUCCESS!")
         print()
@@ -810,40 +849,60 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTHubStartNotebook(YTCommand):
+    args = (
+        dict(dest="folderId", default=ytcfg.get("yt", "hub_sandbox"),
+             nargs="?", 
+             help="(Optional) Hub folder to mount inside the Notebook"),
+    )
+    description = \
+        """
+        Start the Jupyter Notebook on the yt Hub.
+        """
+    subparser = "hub"
+    name = "start"
+    def __call__(self, args):
+        gc = _get_girder_client()
+
+        # TODO: should happen server-side
+        _id = gc._checkResourcePath(args.folderId)
+
+        resp = gc.post("/notebook/{}".format(_id))
+        try:
+            print("Launched! Please visit this URL:")
+            print("    https://tmpnb.hub.yt" + resp['url'])
+            print()
+        except (KeyError, TypeError):
+            print("Something went wrong. The yt Hub responded with : ")
+            print(resp)
+
 class YTNotebookUploadCmd(YTCommand):
     args = (dict(short="file", type=str),)
     description = \
         """
-        Upload an IPython notebook to hub.yt-project.org.
+        Upload an IPython Notebook to the yt Hub.
         """
 
     name = "upload_notebook"
     def __call__(self, args):
-        filename = args.file
-        if not os.path.isfile(filename):
-            raise IOError(filename)
-        if not filename.endswith(".ipynb"):
-            print("File must be an IPython notebook!")
-            return 1
-        import json
-        try:
-            t = json.loads(open(filename).read())['metadata']['name']
-        except (ValueError, KeyError):
-            print("File does not appear to be an IPython notebook.")
-        if len(t) == 0:
-            t = filename.strip(".ipynb")
-        from yt.utilities.minimal_representation import MinimalNotebook
-        mn = MinimalNotebook(filename, t)
-        rv = mn.upload()
+        gc = _get_girder_client()
+        username = gc.get("/user/me")["login"]
+        gc.upload(args.file, "/user/{}/Public".format(username))
+
+        _id = gc.resourceLookup(
+            "/user/{}/Public/{}".format(username, args.file))["_id"]
+        _fid = next(gc.listFile(_id))["_id"]
+        hub_url = urlparse(ytcfg.get("yt", "hub_url"))
         print("Upload successful!")
         print()
         print("To access your raw notebook go here:")
         print()
-        print("  %s" % (rv['url']))
+        print("  {}://{}/#item/{}".format(hub_url.scheme, hub_url.netloc, _id))
         print()
         print("To view your notebook go here:")
         print()
-        print("  %s" % (rv['url'].replace("/go/", "/nb/")))
+        print("  http://nbviewer.jupyter.org/urls/{}/file/{}/download".format(
+            hub_url.netloc + hub_url.path, _fid))
         print()
 
 class YTPlotCmd(YTCommand):
@@ -947,7 +1006,7 @@
             )
     description = \
         """
-        Run the IPython Notebook
+        Start the Jupyter Notebook locally. 
         """
     def __call__(self, args):
         kwargs = {}
@@ -1141,6 +1200,61 @@
             print()
             pprint.pprint(rv)
 
+
+class YTConfigGetCmd(YTCommand):
+    subparser = 'config'
+    name = 'get'
+    description = 'get a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to retrieve.'))
+    def __call__(self, args):
+        from yt.utilities.configure import get_config
+        print(get_config(args.section, args.option))
+
+
+class YTConfigSetCmd(YTCommand):
+    subparser = 'config'
+    name = 'set'
+    description = 'set a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to set.'),
+            dict(short='value', help='The value to set the option to.'))
+    def __call__(self, args):
+        from yt.utilities.configure import set_config
+        set_config(args.section, args.option, args.value)
+
+
+class YTConfigRemoveCmd(YTCommand):
+    subparser = 'config'
+    name = 'rm'
+    description = 'remove a config option'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to remove.'))
+    def __call__(self, args):
+        from yt.utilities.configure import rm_config
+        rm_config(args.section, args.option)
+
+
+class YTConfigListCmd(YTCommand):
+    subparser = 'config'
+    name = 'list'
+    description = 'show the config content'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import write_config
+        write_config(sys.stdout)
+
+
+class YTConfigMigrateCmd(YTCommand):
+    subparser = 'config'
+    name = 'migrate'
+    description = 'migrate old config file'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import migrate_config
+        migrate_config()
+
+
 class YTSearchCmd(YTCommand):
     args = (dict(short="-o", longname="--output",
                  action="store", type=str,

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/utilities/configure.py
--- /dev/null
+++ b/yt/utilities/configure.py
@@ -0,0 +1,92 @@
+# -*- coding: UTF-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import argparse
+from yt.config import CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE
+from yt.extern.six.moves import configparser
+
+CONFIG = configparser.SafeConfigParser()
+CONFIG.read([CURRENT_CONFIG_FILE])
+
+
+def get_config(section, option):
+    return CONFIG.get(section, option)
+
+
+def set_config(section, option, value):
+    if not CONFIG.has_section(section):
+        CONFIG.add_section(section)
+    CONFIG.set(section, option, value)
+    write_config()
+
+
+def write_config(fd=None):
+    if fd is None:
+        with open(CURRENT_CONFIG_FILE, 'w') as fd:
+            CONFIG.write(fd)
+    else:
+        CONFIG.write(fd)
+
+def migrate_config():
+    if not os.path.exists(_OLD_CONFIG_FILE):
+        print("Old config not found.")
+        sys.exit()
+    CONFIG.read(_OLD_CONFIG_FILE)
+    print("Writing a new config file to: {}".format(CURRENT_CONFIG_FILE))
+    write_config()
+    print("Backing up the old config file: {}.bak".format(_OLD_CONFIG_FILE))
+    os.rename(_OLD_CONFIG_FILE, _OLD_CONFIG_FILE + '.bak')
+
+
+def rm_config(section, option):
+    CONFIG.remove_option(section, option)
+    write_config()
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='Get and set configuration values for yt')
+    subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
+
+    get_parser = subparsers.add_parser('get', help='get a config value')
+    set_parser = subparsers.add_parser('set', help='set a config value')
+    rm_parser = subparsers.add_parser('rm', help='remove a config option')
+    subparsers.add_parser('migrate', help='migrate old config file')
+    subparsers.add_parser('list', help='show all config values')
+
+    get_parser.add_argument(
+        'section', help='The section containing the option.')
+    get_parser.add_argument('option', help='The option to retrieve.')
+
+    set_parser.add_argument(
+        'section', help='The section containing the option.')
+    set_parser.add_argument('option', help='The option to set.')
+    set_parser.add_argument('value', help='The value to set the option to.')
+
+    rm_parser.add_argument(
+        'section', help='The section containing the option to remove.')
+    rm_parser.add_argument('option', help='The option to remove.')
+
+    args = parser.parse_args()
+
+    if args.cmd == 'get':
+        print(get_config(args.section, args.option))
+    elif args.cmd == 'set':
+        set_config(args.section, args.option, args.value)
+    elif args.cmd == 'list':
+        write_config(sys.stdout)
+    elif args.cmd == 'migrate':
+        migrate_config()
+    elif args.cmd == 'rm':
+        rm_config(args.section, args.option)
+
+if __name__ == '__main__':
+    main()  # pragma: no cover

diff -r 44449bb0428362428fdb61f20a20eabe672529fe -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -110,6 +110,13 @@
             # We are somewhere in the middle of the face
             temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
             temp_y = ((temp_x - vc.left_edge[i])*vc.idds[i])
+            # There are some really tough cases where we just within a couple
+            # least significant places of the edge, and this helps prevent
+            # killing the calculation through a segfault in those cases.
+            if -1 < temp_y < 0 and step[i] > 0:
+                temp_y = 0.0
+            elif vc.dims[i] - 1 < temp_y < vc.dims[i] and step[i] < 0:
+                temp_y = vc.dims[i] - 1
             cur_ind[i] =  <int> (floor(temp_y))
         if step[i] > 0:
             temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b20e71795374/
Changeset:   b20e71795374
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 16:03:20+00:00
Summary:     Adding a clump finding test.
Affected #:  1 file

diff -r 6d9ef8030186c495b024d6bd6ba4b662f50f7ffc -r b20e7179537445818bd2df7503569b7fae76af5f yt/analysis_modules/level_sets/tests/test_clump_finding.py
--- a/yt/analysis_modules/level_sets/tests/test_clump_finding.py
+++ b/yt/analysis_modules/level_sets/tests/test_clump_finding.py
@@ -15,16 +15,25 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import os
+import shutil
+import tempfile
+
 
 from yt.analysis_modules.level_sets.api import \
     Clump, \
     find_clumps, \
     get_lowest_clumps
+from yt.convenience import \
+    load
 from yt.frontends.stream.api import \
     load_uniform_grid
 from yt.testing import \
     assert_array_equal, \
-    assert_equal
+    assert_equal, \
+    requires_file
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
 
 def test_clump_finding():
     n_c = 8
@@ -63,7 +72,6 @@
     # two leaf clumps
     assert_equal(len(leaf_clumps), 2)
 
-
     # check some clump fields
     assert_equal(master_clump.children[0]["density"][0].size, 1)
     assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
@@ -72,3 +80,58 @@
     assert_equal(master_clump.children[1]["density"][0].size, 1)
     assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
     assert_equal(master_clump.children[1]["particle_mass"].size, 0)
+
+i30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_file(i30)
+def test_clump_tree_save():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(i30)
+    data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                          (8, 'kpc'), (1, 'kpc'))
+
+    field = ("gas", "density")
+    step = 2.0
+    c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+    c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+
+    master_clump = Clump(data_source, field)
+    master_clump.add_info_item("center_of_mass")
+    master_clump.add_validator("min_cells", 20)
+
+    find_clumps(master_clump, c_min, c_max, step)
+    leaf_clumps = get_lowest_clumps(master_clump)
+
+    fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+    ds2 = load(fn)
+
+    # compare clumps in the tree
+    t1 = [c for c in master_clump]
+    t2 = [c for c in ds2.tree]
+    mt1 = ds.arr([c.info["cell_mass"][1] for c in t1])
+    mt2 = ds2.arr([c["clump", "cell_mass"] for c in t2])
+    it1 = np.argsort(mt1).d.astype(int)
+    it2 = np.argsort(mt2).d.astype(int)
+    assert_array_equal(mt1[it1], mt2[it2])
+
+    for i1, i2 in zip(it1, it2):
+        ct1 = t1[i1]
+        ct2 = t2[i2]
+        assert_array_equal(ct1["gas", "density"],
+                           ct2["grid", "density"])
+        assert_array_equal(ct1["all", "particle_mass"],
+                           ct2["all", "particle_mass"])
+
+    # compare leaf clumps
+    c1 = [c for c in leaf_clumps]
+    c2 = [c for c in ds2.leaves]
+    mc1 = ds.arr([c.info["cell_mass"][1] for c in c1])
+    mc2 = ds2.arr([c["clump", "cell_mass"] for c in c2])
+    ic1 = np.argsort(mc1).d.astype(int)
+    ic2 = np.argsort(mc2).d.astype(int)
+    assert_array_equal(mc1[ic1], mc2[ic2])
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/fca8adec0450/
Changeset:   fca8adec0450
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 16:04:27+00:00
Summary:     Typo.
Affected #:  1 file

diff -r b20e7179537445818bd2df7503569b7fae76af5f -r fca8adec04509c2c69ff3754041f2e343fcdac35 yt/utilities/tree_container.py
--- a/yt/utilities/tree_container.py
+++ b/yt/utilities/tree_container.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 class TreeContainer(object):
-    r"""A recursive data container for like merger trees and
+    r"""A recursive data container for things like merger trees and
     clump-finder trees.
 
     """


https://bitbucket.org/yt_analysis/yt/commits/9feb4d87348b/
Changeset:   9feb4d87348b
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 16:09:24+00:00
Summary:     Adding imports and reference docs.
Affected #:  2 files

diff -r fca8adec04509c2c69ff3754041f2e343fcdac35 -r 9feb4d87348bfcf14b1551aa7bebac297b16d0c8 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -399,6 +399,8 @@
    ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
    ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
    ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpTreeDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpContainer
    ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
    ~yt.frontends.ytdata.fields.YTGridFieldInfo
    ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5

diff -r fca8adec04509c2c69ff3754041f2e343fcdac35 -r 9feb4d87348bfcf14b1551aa7bebac297b16d0c8 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -23,7 +23,9 @@
     YTNonspatialDataset, \
     YTNonspatialHierarchy, \
     YTNonspatialGrid, \
-    YTProfileDataset
+    YTProfileDataset, \
+    YTClumpContainer, \
+    YTClumpTreeDataset
 
 from .io import \
     IOHandlerYTDataContainerHDF5, \


https://bitbucket.org/yt_analysis/yt/commits/622087339b81/
Changeset:   622087339b81
Branch:      yt
User:        brittonsmith
Date:        2016-08-16 13:11:17+00:00
Summary:     Updating clump finding docs.
Affected #:  2 files

diff -r 9feb4d87348bfcf14b1551aa7bebac297b16d0c8 -r 622087339b816e66a5c8bbadf83e09a948acd0d5 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -13,8 +13,14 @@
 the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
+Setting up the Clump Finder
+---------------------------
+
 The clump finder requires a data object (see :ref:`data-objects`) and a field
-over which the contouring is to be performed.
+over which the contouring is to be performed.  The data object is then used
+to create the initial
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object that
+acts as the base for clump finding.
 
 .. code:: python
 
@@ -28,11 +34,15 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
+Clump Validators
+----------------
+
 At this point, every isolated contour will be considered a clump,
 whether this is physical or not.  Validator functions can be added to
 determine if an individual contour should be considered a real clump.
-These functions are specified with the ``Clump.add_validator`` function.
-Current, two validators exist: a minimum number of cells and gravitational
+These functions are specified with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator`
+function.  Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -52,7 +62,8 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can
+The :func:`~yt.analysis_modules.level_sets.clump_validators.add_validator`
+function adds the validator to a registry that can
 be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
@@ -60,9 +71,15 @@
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum
-and maximum of the contouring field, and the step size.  The lower value of the
-contour finder will be continually multiplied by the step size.
+Running the Clump Finder
+------------------------
+
+Clump finding then proceeds by calling the
+:func:`~yt.analysis_modules.level_sets.clump_handling.find_clumps` function.
+This function accepts the
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object, the initial
+minimum and maximum of the contouring field, and the step size.  The lower value
+of the contour finder will be continually multiplied by the step size.
 
 .. code:: python
 
@@ -71,41 +88,27 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
-with its own ``children`` attribute, and so on.
+Calculating Clump Quantities
+----------------------------
 
-A number of helper routines exist for examining the clump hierarchy.
-
-.. code:: python
-
-   # Write a text file of the full hierarchy.
-   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-   # Write a text file of only the leaf nodes.
-   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
-
-   # Get a list of just the leaf nodes.
-   leaf_clumps = get_lowest_clumps(master_clump)
-
-``Clump`` objects can be used like all other data containers.
-
-.. code:: python
-
-   print(leaf_clumps[0]["gas", "density"])
-   print(leaf_clumps[0].quantities.total_mass())
-
-The writing functions will write out a series or properties about each
-clump by default.  Additional properties can be appended with the
-``Clump.add_info_item`` function.
+By default, a number of quantities will be calculated for each clump when the
+clump finding process has finished.  The default quantities are: **total_cells**,
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
+**max_grid_level**, **min_number_density**, and **max_number_density**.
+Additional items can be added with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item`
+function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
 Just like the validators, custom info items can be added by defining functions
-that minimally accept a ``Clump`` object and return a string to be printed.
+that minimally accept a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object and return
+a format string to be printed and the value.  These are then added to the list
+of available info items by calling
+:func:`~yt.analysis_modules.level_sets.clump_info_items.add_clump_info`:
 
 .. code:: python
 
@@ -121,10 +124,47 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**,
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
-**max_grid_level**, **min_number_density**, **max_number_density**, and
-**distance_to_main_clump**.
+Beside the quantities calculated by default, the following are available:
+**center_of_mass** and **distance_to_main_clump**.
+
+Working with Clumps
+-------------------
+
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+contains a list of all sub-clumps.  Each sub-clump is also a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+with its own ``children`` attribute, and so on.
+
+.. code:: python
+
+   print(master_clump["gas", "density"])
+   print(master_clump.children)
+   print(master_clump.children[0]["gas", "density"])
+
+The entire clump tree can traversed with a loop syntax:
+
+.. code:: python
+
+   for clump in master_clump:
+       print(clump.clump_id)
+
+The :func:`~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps`
+function will return a list of the individual clumps that have no children
+of their own (the leaf clumps).
+
+.. code:: python
+
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
+
+   print(leaf_clumps[0]["gas", "density"])
+   print(leaf_clumps[0]["all", "particle_mass"])
+   print(leaf_clumps[0].quantities.total_mass())
+
+Visualizing Clumps
+------------------
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
@@ -134,3 +174,44 @@
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')
+
+Saving and Reloading Clump Data
+-------------------------------
+
+The clump tree can be saved as a reloadable dataset with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset`
+function.  This will save all info items that have been calculated as well as
+any field values specified with the *fields* keyword.  This function
+can be called for any clump in the tree, saving that clump and all those
+below it.
+
+.. code:: python
+
+   fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+
+The clump tree can then be reloaded as a regular dataset.  The ``tree`` attribute
+associated with the dataset provides access to the clump tree.  The tree can be
+iterated over in the same fashion as the original tree.
+
+.. code:: python
+
+   ds_clumps = yt.load(fn)
+   for clump ds_clumps.tree:
+       print(clump.clump_id)
+
+The ``leaves`` attribute returns a list of all leaf clumps.
+
+.. code:: python
+
+   print(ds_clumps.leaves)
+
+Info items for each clump can be accessed with the `clump` field type.  Gas
+or grid fields should be accessed using the `grid` field type and particle
+fields should be access using the specific particle type.
+
+.. code:: python
+
+   my_clump = ds_clumps.leaves[0]
+   print(my_clumps["clump", "cell_mass"])
+   print(my_clumps["grid", "density"])
+   print(my_clumps["all", "particle_mass"])

diff -r 9feb4d87348bfcf14b1551aa7bebac297b16d0c8 -r 622087339b816e66a5c8bbadf83e09a948acd0d5 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -444,6 +444,26 @@
    ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
+.. _clump_finding:
+
+Clump Finding
+^^^^^^^^^^^^^
+
+The ``Clump`` object and associated functions can be used for identification
+of topologically disconnected structures, i.e., clump finding.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.analysis_modules.level_sets.clump_handling.Clump
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset
+   ~yt.analysis_modules.level_sets.clump_handling.find_clumps
+   ~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps
+   ~yt.analysis_modules.level_sets.clump_info_items.add_clump_info
+   ~yt.analysis_modules.level_sets.clump_validators.add_validator
+
 .. _halo_analysis_ref:
 
 Halo Analysis


https://bitbucket.org/yt_analysis/yt/commits/c7e0e02fb9ff/
Changeset:   c7e0e02fb9ff
Branch:      yt
User:        brittonsmith
Date:        2016-08-17 15:34:51+00:00
Summary:     Doc format change.
Affected #:  1 file

diff -r 622087339b816e66a5c8bbadf83e09a948acd0d5 -r c7e0e02fb9ffb4073a22c632ecc44dd1f185dc95 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -92,9 +92,9 @@
 ----------------------------
 
 By default, a number of quantities will be calculated for each clump when the
-clump finding process has finished.  The default quantities are: **total_cells**,
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
-**max_grid_level**, **min_number_density**, and **max_number_density**.
+clump finding process has finished.  The default quantities are: ``total_cells``,
+``cell_mass``, ``mass_weighted_jeans_mass``, ``volume_weighted_jeans_mass``,
+``max_grid_level``, ``min_number_density``, and ``max_number_density``.
 Additional items can be added with the
 :func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item`
 function.
@@ -125,7 +125,7 @@
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
 Beside the quantities calculated by default, the following are available:
-**center_of_mass** and **distance_to_main_clump**.
+``center_of_mass`` and ``distance_to_main_clump``.
 
 Working with Clumps
 -------------------


https://bitbucket.org/yt_analysis/yt/commits/36670bc8b29f/
Changeset:   36670bc8b29f
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 09:14:01+00:00
Summary:     Add deprecation warning.
Affected #:  1 file

diff -r c7e0e02fb9ffb4073a22c632ecc44dd1f185dc95 -r 36670bc8b29fc260a7d22246f200e3677ffc09ef yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -145,6 +145,7 @@
         for child in self.children:
             child.clear_clump_info()
 
+    @deprecate("Clump.save_as_dataset")
     def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 


https://bitbucket.org/yt_analysis/yt/commits/55da3cb87913/
Changeset:   55da3cb87913
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 09:33:45+00:00
Summary:     Ensure type of contour_key.
Affected #:  1 file

diff -r 36670bc8b29fc260a7d22246f200e3677ffc09ef -r 55da3cb8791326cbcc24fa37b0b5617c86922d76 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -295,7 +295,7 @@
             contour_fields = \
               [("index", "contours_%s" % ckey)
                for ckey in np.unique(clump_info["contour_key"]) \
-               if ckey != "-1"]
+               if str(ckey) != "-1"]
 
             ptypes = []
             field_data = {}


https://bitbucket.org/yt_analysis/yt/commits/ed78789acef4/
Changeset:   ed78789acef4
Branch:      yt
User:        MatthewTurk
Date:        2016-09-07 18:44:20+00:00
Summary:     Merged in brittonsmith/yt (pull request #2326)

Adding save_as_dataset for clump finder
Affected #:  13 files

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -13,8 +13,14 @@
 the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
+Setting up the Clump Finder
+---------------------------
+
 The clump finder requires a data object (see :ref:`data-objects`) and a field
-over which the contouring is to be performed.
+over which the contouring is to be performed.  The data object is then used
+to create the initial
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object that
+acts as the base for clump finding.
 
 .. code:: python
 
@@ -28,11 +34,15 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
+Clump Validators
+----------------
+
 At this point, every isolated contour will be considered a clump,
 whether this is physical or not.  Validator functions can be added to
 determine if an individual contour should be considered a real clump.
-These functions are specified with the ``Clump.add_validator`` function.
-Current, two validators exist: a minimum number of cells and gravitational
+These functions are specified with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator`
+function.  Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -52,7 +62,8 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can
+The :func:`~yt.analysis_modules.level_sets.clump_validators.add_validator`
+function adds the validator to a registry that can
 be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
@@ -60,9 +71,15 @@
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum
-and maximum of the contouring field, and the step size.  The lower value of the
-contour finder will be continually multiplied by the step size.
+Running the Clump Finder
+------------------------
+
+Clump finding then proceeds by calling the
+:func:`~yt.analysis_modules.level_sets.clump_handling.find_clumps` function.
+This function accepts the
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object, the initial
+minimum and maximum of the contouring field, and the step size.  The lower value
+of the contour finder will be continually multiplied by the step size.
 
 .. code:: python
 
@@ -71,41 +88,27 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
-with its own ``children`` attribute, and so on.
+Calculating Clump Quantities
+----------------------------
 
-A number of helper routines exist for examining the clump hierarchy.
-
-.. code:: python
-
-   # Write a text file of the full hierarchy.
-   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-   # Write a text file of only the leaf nodes.
-   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
-
-   # Get a list of just the leaf nodes.
-   leaf_clumps = get_lowest_clumps(master_clump)
-
-``Clump`` objects can be used like all other data containers.
-
-.. code:: python
-
-   print(leaf_clumps[0]["gas", "density"])
-   print(leaf_clumps[0].quantities.total_mass())
-
-The writing functions will write out a series or properties about each
-clump by default.  Additional properties can be appended with the
-``Clump.add_info_item`` function.
+By default, a number of quantities will be calculated for each clump when the
+clump finding process has finished.  The default quantities are: ``total_cells``,
+``cell_mass``, ``mass_weighted_jeans_mass``, ``volume_weighted_jeans_mass``,
+``max_grid_level``, ``min_number_density``, and ``max_number_density``.
+Additional items can be added with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item`
+function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
 Just like the validators, custom info items can be added by defining functions
-that minimally accept a ``Clump`` object and return a string to be printed.
+that minimally accept a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object and return
+a format string to be printed and the value.  These are then added to the list
+of available info items by calling
+:func:`~yt.analysis_modules.level_sets.clump_info_items.add_clump_info`:
 
 .. code:: python
 
@@ -121,10 +124,47 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**,
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
-**max_grid_level**, **min_number_density**, **max_number_density**, and
-**distance_to_main_clump**.
+Beside the quantities calculated by default, the following are available:
+``center_of_mass`` and ``distance_to_main_clump``.
+
+Working with Clumps
+-------------------
+
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+contains a list of all sub-clumps.  Each sub-clump is also a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+with its own ``children`` attribute, and so on.
+
+.. code:: python
+
+   print(master_clump["gas", "density"])
+   print(master_clump.children)
+   print(master_clump.children[0]["gas", "density"])
+
+The entire clump tree can traversed with a loop syntax:
+
+.. code:: python
+
+   for clump in master_clump:
+       print(clump.clump_id)
+
+The :func:`~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps`
+function will return a list of the individual clumps that have no children
+of their own (the leaf clumps).
+
+.. code:: python
+
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
+
+   print(leaf_clumps[0]["gas", "density"])
+   print(leaf_clumps[0]["all", "particle_mass"])
+   print(leaf_clumps[0].quantities.total_mass())
+
+Visualizing Clumps
+------------------
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
@@ -134,3 +174,44 @@
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')
+
+Saving and Reloading Clump Data
+-------------------------------
+
+The clump tree can be saved as a reloadable dataset with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset`
+function.  This will save all info items that have been calculated as well as
+any field values specified with the *fields* keyword.  This function
+can be called for any clump in the tree, saving that clump and all those
+below it.
+
+.. code:: python
+
+   fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+
+The clump tree can then be reloaded as a regular dataset.  The ``tree`` attribute
+associated with the dataset provides access to the clump tree.  The tree can be
+iterated over in the same fashion as the original tree.
+
+.. code:: python
+
+   ds_clumps = yt.load(fn)
+   for clump ds_clumps.tree:
+       print(clump.clump_id)
+
+The ``leaves`` attribute returns a list of all leaf clumps.
+
+.. code:: python
+
+   print(ds_clumps.leaves)
+
+Info items for each clump can be accessed with the `clump` field type.  Gas
+or grid fields should be accessed using the `grid` field type and particle
+fields should be access using the specific particle type.
+
+.. code:: python
+
+   my_clump = ds_clumps.leaves[0]
+   print(my_clumps["clump", "cell_mass"])
+   print(my_clumps["grid", "density"])
+   print(my_clumps["all", "particle_mass"])

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -27,14 +27,14 @@
 # As many validators can be added as you want.
 master_clump.add_validator("min_cells", 20)
 
+# Calculate center of mass for all clumps.
+master_clump.add_info_item("center_of_mass")
+
 # Begin clump finding.
 find_clumps(master_clump, c_min, c_max, step)
 
-# Write out the full clump hierarchy.
-write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-# Write out only the leaf nodes of the hierarchy.
-write_clumps(master_clump,0, "%s_clumps.txt" % ds)
+# Save the clump tree as a reloadable dataset
+fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
 
 # We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
@@ -46,5 +46,17 @@
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
-# Lastly, we write the plot to disk.
+# Save the plot to disk.
 prj.save('clumps')
+
+# Reload the clump dataset.
+cds = yt.load(fn)
+
+# Query fields for clumps in the tree.
+print (cds.tree["clump", "center_of_mass"])
+print (cds.tree.children[0]["grid", "density"])
+print (cds.tree.children[1]["all", "particle_mass"])
+
+# Get all of the leaf clumps.
+print (cds.leaves)
+print (cds.leaves[0]["clump", "cell_mass"])

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -399,6 +399,8 @@
    ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
    ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
    ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpTreeDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpContainer
    ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
    ~yt.frontends.ytdata.fields.YTGridFieldInfo
    ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
@@ -442,6 +444,26 @@
    ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
+.. _clump_finding:
+
+Clump Finding
+^^^^^^^^^^^^^
+
+The ``Clump`` object and associated functions can be used for identification
+of topologically disconnected structures, i.e., clump finding.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.analysis_modules.level_sets.clump_handling.Clump
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset
+   ~yt.analysis_modules.level_sets.clump_handling.find_clumps
+   ~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps
+   ~yt.analysis_modules.level_sets.clump_info_items.add_clump_info
+   ~yt.analysis_modules.level_sets.clump_validators.add_validator
+
 .. _halo_analysis_ref:
 
 Halo Analysis

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,14 +13,22 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import copy
 import numpy as np
 import uuid
 
 from yt.fields.derived_field import \
     ValidateSpatial
-from yt.funcs import mylog, iterable
-from yt.extern.six import string_types
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
+from yt.funcs import \
+    deprecate, \
+    get_output_filename, \
+    iterable, \
+    mylog
+from yt.extern.six import \
+    string_types
+from yt.utilities.tree_container import \
+    TreeContainer
 
 from .clump_info_items import \
     clump_info_registry
@@ -46,28 +54,40 @@
                  display_field=False,
                  units='')
 
-class Clump(object):
+class Clump(TreeContainer):
     children = None
     def __init__(self, data, field, parent=None,
-                 clump_info=None, validators=None):
+                 clump_info=None, validators=None,
+                 base=None, contour_key=None,
+                 contour_id=None):
         self.data = data
         self.field = field
         self.parent = parent
         self.quantities = data.quantities
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
+        self.info = {}
+
+        # is this the parent clump?
+        if base is None:
+            base = self
+            self.total_clumps = 0
+            if clump_info is None:
+                self.set_default_clump_info()
+            else:
+                self.clump_info = clump_info
+
+        self.base = base
+        self.clump_id = self.base.total_clumps
+        self.base.total_clumps += 1
+        self.contour_key = contour_key
+        self.contour_id = contour_id
 
         if parent is not None:
             self.data.parent = self.parent.data
 
-        # List containing characteristics about clumps that are to be written 
-        # out by the write routines.
-        if clump_info is None:
-            self.set_default_clump_info()
-        else:
-            # Clump info will act the same if add_info_item is called 
-            # before or after clump finding.
-            self.clump_info = copy.deepcopy(clump_info)
+        if parent is not None:
+            self.data.parent = self.parent.data
 
         if validators is None:
             validators = []
@@ -125,10 +145,11 @@
         for child in self.children:
             child.clear_clump_info()
 
+    @deprecate("Clump.save_as_dataset")
     def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
-        for item in self.clump_info:
+        for item in self.base.clump_info:
             value = item(self)
             f_ptr.write("%s%s\n" % ('\t'*level, value))
 
@@ -159,8 +180,190 @@
                 # Using "ones" here will speed things up.
                 continue
             self.children.append(Clump(new_clump, self.field, parent=self,
-                                       clump_info=self.clump_info,
-                                       validators=self.validators))
+                                       validators=self.validators,
+                                       base=self.base,
+                                       contour_key=contour_key,
+                                       contour_id=cid))
+
+    def __iter__(self):
+        yield self
+        if self.children is None:
+            return
+        for child in self.children:
+            for a_node in child:
+                yield a_node
+
+    def save_as_dataset(self, filename=None, fields=None):
+        r"""Export clump tree to a reloadable yt dataset.
+
+        This function will take a clump object and output a dataset
+        containing the fields given in the ``fields`` list and all info
+        items.  The resulting dataset can be reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset and the clump
+            index.
+        fields : list of strings or tuples, optional
+            If this is supplied, it is the list of fields to be saved to
+            disk.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> from yt.analysis_modules.level_sets.api import \
+        ...         Clump, find_clumps
+        >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+        ...                       (8, 'kpc'), (1, 'kpc'))
+        >>> field = ("gas", "density")
+        >>> step = 2.0
+        >>> c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+        >>> c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+        >>> master_clump = Clump(data_source, field)
+        >>> master_clump.add_info_item("center_of_mass")
+        >>> master_clump.add_validator("min_cells", 20)
+        >>> find_clumps(master_clump, c_min, c_max, step)
+        >>> fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+        >>> new_ds = yt.load(fn)
+        >>> print (ds.tree["clump", "cell_mass"])
+        1296926163.91 Msun
+        >>> print ds.tree["grid", "density"]
+        [  2.54398434e-26   2.46620353e-26   2.25120154e-26 ...,   1.12879234e-25
+           1.59561490e-25   1.09824903e-24] g/cm**3
+        >>> print ds.tree["all", "particle_mass"]
+        [  4.25472446e+38   4.25472446e+38   4.25472446e+38 ...,   2.04238266e+38
+           2.04523901e+38   2.04770938e+38] g
+        >>> print ds.tree.children[0]["clump", "cell_mass"]
+        909636495.312 Msun
+        >>> print ds.leaves[0]["clump", "cell_mass"]
+        3756566.99809 Msun
+        >>> print ds.leaves[0]["grid", "density"]
+        [  6.97820274e-24   6.58117370e-24   7.32046082e-24   6.76202430e-24
+           7.41184837e-24   6.76981480e-24   6.94287213e-24   6.56149658e-24
+           6.76584569e-24   6.94073710e-24   7.06713082e-24   7.22556526e-24
+           7.08338898e-24   6.78684331e-24   7.40647040e-24   7.03050456e-24
+           7.12438678e-24   6.56310217e-24   7.23201662e-24   7.17314333e-24] g/cm**3
+
+        """
+
+        ds = self.data.ds
+        keyword = "%s_clump_%d" % (str(ds), self.clump_id)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        # collect clump info fields
+        clump_info = dict([(ci.name, []) for ci in self.base.clump_info])
+        clump_info.update(
+            dict([(field, []) for field in ["clump_id", "parent_id",
+                                            "contour_key", "contour_id"]]))
+        for clump in self:
+            clump_info["clump_id"].append(clump.clump_id)
+            if clump.parent is None:
+                parent_id = -1
+            else:
+                parent_id = clump.parent.clump_id
+            clump_info["parent_id"].append(parent_id)
+
+            contour_key = clump.contour_key
+            if contour_key is None: contour_key = -1
+            clump_info["contour_key"].append(contour_key)
+            contour_id = clump.contour_id
+            if contour_id is None: contour_id = -1
+            clump_info["contour_id"].append(contour_id)
+
+            for ci in self.base.clump_info:
+                ci(clump)
+                clump_info[ci.name].append(clump.info[ci.name][1])
+        for ci in clump_info:
+            if hasattr(clump_info[ci][0], "units"):
+                clump_info[ci] = ds.arr(clump_info[ci])
+            else:
+                clump_info[ci] = np.array(clump_info[ci])
+
+        ftypes = dict([(ci, "clump") for ci in clump_info])
+
+        # collect data fields
+        if fields is not None:
+            contour_fields = \
+              [("index", "contours_%s" % ckey)
+               for ckey in np.unique(clump_info["contour_key"]) \
+               if str(ckey) != "-1"]
+
+            ptypes = []
+            field_data = {}
+            need_grid_positions = False
+            for f in self.base.data._determine_fields(fields) + contour_fields:
+                field_data[f] = self.base[f]
+                if ds.field_info[f].particle_type:
+                    if f[0] not in ptypes:
+                        ptypes.append(f[0])
+                    ftypes[f] = f[0]
+                else:
+                    need_grid_positions = True
+                    ftypes[f] = "grid"
+
+            if len(ptypes) > 0:
+                for ax in "xyz":
+                    for ptype in ptypes:
+                        p_field = (ptype, "particle_position_%s" % ax)
+                        if p_field in ds.field_info and \
+                          p_field not in field_data:
+                            ftypes[p_field] = p_field[0]
+                            field_data[p_field] = self.base[p_field]
+
+                for clump in self:
+                    if clump.contour_key is None:
+                        continue
+                    for ptype in ptypes:
+                        cfield = (ptype, "contours_%s" % clump.contour_key)
+                        if cfield not in field_data:
+                            field_data[cfield] = \
+                              clump.data._part_ind(ptype).astype(np.int64)
+                            ftypes[cfield] = ptype
+                        field_data[cfield][clump.data._part_ind(ptype)] = \
+                          clump.contour_id
+
+            if need_grid_positions:
+                for ax in "xyz":
+                    g_field = ("index", ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        field_data[g_field] = self.base[g_field]
+                        ftypes[g_field] = "grid"
+                    g_field = ("index", "d" + ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        ftypes[g_field] = "grid"
+                        field_data[g_field] = self.base[g_field]
+
+            if self.contour_key is not None:
+                cfilters = {}
+                for field in field_data:
+                    if ftypes[field] == "grid":
+                        ftype = "index"
+                    else:
+                        ftype = field[0]
+                    cfield = (ftype, "contours_%s" % self.contour_key)
+                    if cfield not in cfilters:
+                        cfilters[cfield] = field_data[cfield] == self.contour_id
+                    field_data[field] = field_data[field][cfilters[cfield]]
+
+        clump_info.update(field_data)
+        extra_attrs = {"data_type": "yt_clump_tree",
+                       "container_type": "yt_clump_tree"}
+        save_as_dataset(ds, filename, clump_info,
+                        field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
 
     def pass_down(self,operation):
         """
@@ -270,6 +473,7 @@
 
     return clump_list
 
+ at deprecate("Clump.save_as_dataset")
 def write_clump_index(clump, level, fh):
     top = False
     if isinstance(fh, string_types):
@@ -287,6 +491,7 @@
     if top:
         fh.close()
 
+ at deprecate("Clump.save_as_dataset")
 def write_clumps(clump, level, fh):
     top = False
     if isinstance(fh, string_types):

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -21,14 +21,15 @@
 clump_info_registry = OperatorRegistry()
 
 def add_clump_info(name, function):
-    clump_info_registry[name] = ClumpInfoCallback(function)
+    clump_info_registry[name] = ClumpInfoCallback(name, function)
 
 class ClumpInfoCallback(object):
     r"""
     A ClumpInfoCallback is a function that takes a clump, computes a 
     quantity, and returns a string to be printed out for writing clump info.
     """
-    def __init__(self, function, args=None, kwargs=None):
+    def __init__(self, name, function, args=None, kwargs=None):
+        self.name = name
         self.function = function
         self.args = args
         if self.args is None: self.args = []
@@ -36,43 +37,51 @@
         if self.kwargs is None: self.kwargs = {}
 
     def __call__(self, clump):
-        return self.function(clump, *self.args, **self.kwargs)
-    
+        if self.name not in clump.info:
+            clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
+        rv = clump.info[self.name]
+        return rv[0] % rv[1]
+
+def _center_of_mass(clump, units="code_length", **kwargs):
+    p = clump.quantities.center_of_mass(**kwargs)
+    return "Center of mass: %s.", p.to(units)
+add_clump_info("center_of_mass", _center_of_mass)
+
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
-    return "Cells: %d." % n_cells
+    return "Cells: %d.", n_cells
 add_clump_info("total_cells", _total_cells)
 
 def _cell_mass(clump):
     cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
-    return "Mass: %e Msun." % cell_mass
+    return "Mass: %e Msun.", cell_mass
 add_clump_info("cell_mass", _cell_mass)
 
 def _mass_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
-    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (mass-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
 
 def _volume_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("index", "cell_volume")).in_units("Msun")
-    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (volume-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
 
 def _max_grid_level(clump):
     max_level = clump.data["index", "grid_level"].max()
-    return "Max grid level: %d." % max_level
+    return "Max grid level: %d.", max_level
 add_clump_info("max_grid_level", _max_grid_level)
 
 def _min_number_density(clump):
     min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
-    return "Min number density: %.6e cm^-3." % min_n
+    return "Min number density: %.6e cm^-3.", min_n
 add_clump_info("min_number_density", _min_number_density)
 
 def _max_number_density(clump):
     max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
-    return "Max number density: %.6e cm^-3." % max_n
+    return "Max number density: %.6e cm^-3.", max_n
 add_clump_info("max_number_density", _max_number_density)
 
 def _distance_to_main_clump(clump, units="pc"):
@@ -82,6 +91,7 @@
     master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
     my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e %s." % \
-      (distance.in_units(units), units)
+    distance.convert_to_units("pc")
+    return "Distance from master center of mass: %%.6e %s." % units, \
+      distance.in_units(units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,6 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.utilities.lib.misc_utilities import \
     gravitational_binding_energy
 from yt.utilities.operator_registry import \
@@ -64,28 +66,30 @@
              (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
              (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
 
+    if use_particles:
+        m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
+                            clump["all", "particle_mass"].in_cgs()])
+        px = np.concatenate([clump["index", "x"].in_cgs(),
+                             clump["all", "particle_position_x"].in_cgs()])
+        py = np.concatenate([clump["index", "y"].in_cgs(),
+                             clump["all", "particle_position_y"].in_cgs()])
+        pz = np.concatenate([clump["index", "z"].in_cgs(),
+                             clump["all", "particle_position_z"].in_cgs()])
+    else:
+        m = clump["gas", "cell_mass"].in_cgs()
+        px = clump["index", "x"].in_cgs()
+        py = clump["index", "y"].in_cgs()
+        pz = clump["index", "z"].in_cgs()
+
     potential = clump.data.ds.quan(G *
         gravitational_binding_energy(
-            clump["gas", "cell_mass"].in_cgs(),
-            clump["index", "x"].in_cgs(),
-            clump["index", "y"].in_cgs(),
-            clump["index", "z"].in_cgs(),
+            m, px, py, pz,
             truncate, (kinetic / G).in_cgs()),
-        kinetic.in_cgs().units)
-    
+            kinetic.in_cgs().units)
+
     if truncate and potential >= kinetic:
         return True
 
-    if use_particles:
-        potential += clump.data.ds.quan(G *
-            gravitational_binding_energy(
-                clump["all", "particle_mass"].in_cgs(),
-                clump["all", "particle_position_x"].in_cgs(),
-                clump["all", "particle_position_y"].in_cgs(),
-                clump["all", "particle_position_z"].in_cgs(),
-                truncate, ((kinetic - potential) / G).in_cgs()),
-        kinetic.in_cgs().units)
-
     return potential >= kinetic
 add_validator("gravitationally_bound", _gravitationally_bound)
 

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/analysis_modules/level_sets/tests/test_clump_finding.py
--- a/yt/analysis_modules/level_sets/tests/test_clump_finding.py
+++ b/yt/analysis_modules/level_sets/tests/test_clump_finding.py
@@ -15,16 +15,25 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import os
+import shutil
+import tempfile
+
 
 from yt.analysis_modules.level_sets.api import \
     Clump, \
     find_clumps, \
     get_lowest_clumps
+from yt.convenience import \
+    load
 from yt.frontends.stream.api import \
     load_uniform_grid
 from yt.testing import \
     assert_array_equal, \
-    assert_equal
+    assert_equal, \
+    requires_file
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
 
 def test_clump_finding():
     n_c = 8
@@ -63,7 +72,6 @@
     # two leaf clumps
     assert_equal(len(leaf_clumps), 2)
 
-
     # check some clump fields
     assert_equal(master_clump.children[0]["density"][0].size, 1)
     assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
@@ -72,3 +80,58 @@
     assert_equal(master_clump.children[1]["density"][0].size, 1)
     assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
     assert_equal(master_clump.children[1]["particle_mass"].size, 0)
+
+i30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_file(i30)
+def test_clump_tree_save():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(i30)
+    data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                          (8, 'kpc'), (1, 'kpc'))
+
+    field = ("gas", "density")
+    step = 2.0
+    c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+    c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+
+    master_clump = Clump(data_source, field)
+    master_clump.add_info_item("center_of_mass")
+    master_clump.add_validator("min_cells", 20)
+
+    find_clumps(master_clump, c_min, c_max, step)
+    leaf_clumps = get_lowest_clumps(master_clump)
+
+    fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+    ds2 = load(fn)
+
+    # compare clumps in the tree
+    t1 = [c for c in master_clump]
+    t2 = [c for c in ds2.tree]
+    mt1 = ds.arr([c.info["cell_mass"][1] for c in t1])
+    mt2 = ds2.arr([c["clump", "cell_mass"] for c in t2])
+    it1 = np.argsort(mt1).d.astype(int)
+    it2 = np.argsort(mt2).d.astype(int)
+    assert_array_equal(mt1[it1], mt2[it2])
+
+    for i1, i2 in zip(it1, it2):
+        ct1 = t1[i1]
+        ct2 = t2[i2]
+        assert_array_equal(ct1["gas", "density"],
+                           ct2["grid", "density"])
+        assert_array_equal(ct1["all", "particle_mass"],
+                           ct2["all", "particle_mass"])
+
+    # compare leaf clumps
+    c1 = [c for c in leaf_clumps]
+    c2 = [c for c in ds2.leaves]
+    mc1 = ds.arr([c.info["cell_mass"][1] for c in c1])
+    mc2 = ds2.arr([c["clump", "cell_mass"] for c in c2])
+    ic1 = np.argsort(mc1).d.astype(int)
+    ic2 = np.argsort(mc2).d.astype(int)
+    assert_array_equal(mc1[ic1], mc2[ic2])
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -783,6 +783,7 @@
         self.conditionals = ensure_list(conditionals)
         self.base_object = data_source
         self._selector = None
+        self._particle_mask = {}
         # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
         # ires and get_data
 
@@ -805,7 +806,8 @@
             f = self.base_object[field]
             if f.shape != ind.shape:
                 parent = getattr(self, "parent", self.base_object)
-                self.field_data[field] = parent[field][self._part_ind]
+                self.field_data[field] = \
+                  parent[field][self._part_ind(field[0])]
             else:
                 self.field_data[field] = self.base_object[field][ind]
 
@@ -835,21 +837,22 @@
                 np.logical_and(res, ind, ind)
         return ind
 
-    _particle_mask = None
-    @property
-    def _part_ind(self):
-        if self._particle_mask is None:
+    def _part_ind(self, ptype):
+        if self._particle_mask.get(ptype) is None:
             parent = getattr(self, "parent", self.base_object)
             units = "code_length"
             mask = points_in_cells(
-                self["x"].to(units), self["y"].to(units),
-                self["z"].to(units), self["dx"].to(units),
-                self["dy"].to(units), self["dz"].to(units),
-                parent["particle_position_x"].to(units),
-                parent["particle_position_y"].to(units),
-                parent["particle_position_z"].to(units))
-            self._particle_mask = mask
-        return self._particle_mask
+                self[("index", "x")].to(units),
+                self[("index", "y")].to(units),
+                self[("index", "z")].to(units),
+                self[("index", "dx")].to(units),
+                self[("index", "dy")].to(units),
+                self[("index", "dz")].to(units),
+                parent[(ptype, "particle_position_x")].to(units),
+                parent[(ptype, "particle_position_y")].to(units),
+                parent[(ptype, "particle_position_z")].to(units))
+            self._particle_mask[ptype] = mask
+        return self._particle_mask[ptype]
 
     @property
     def icoords(self):

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -23,7 +23,9 @@
     YTNonspatialDataset, \
     YTNonspatialHierarchy, \
     YTNonspatialGrid, \
-    YTProfileDataset
+    YTProfileDataset, \
+    YTClumpContainer, \
+    YTClumpTreeDataset
 
 from .io import \
     IOHandlerYTDataContainerHDF5, \

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -56,6 +56,8 @@
     _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.utilities.tree_container import \
+    TreeContainer
 from yt.fields.field_exceptions import \
     NeedsGridType
 from yt.data_objects.data_containers import \
@@ -480,20 +482,37 @@
                 particles.append((ftype, fname))
             elif (ftype, fname) not in fluids:
                 fluids.append((ftype, fname))
+
         # The _read method will figure out which fields it needs to get from
         # disk, and return a dict of those fields along with the fields that
         # need to be generated.
         read_fluids, gen_fluids = self.index._read_fluid_fields(
                                         fluids, self, self._current_chunk)
         for f, v in read_fluids.items():
-            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
-            self.field_data[f].convert_to_units(finfos[f].output_units)
+            convert = True
+            if v.dtype != np.float64:
+                if finfos[f].units == "":
+                    self.field_data[f] = v
+                    convert = False
+                else:
+                    v = v.astype(np.float64)
+            if convert:
+                self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+                self.field_data[f].convert_to_units(finfos[f].output_units)
 
-        read_particles, gen_particles = self.index._read_particle_fields(
+        read_particles, gen_particles = self.index._read_fluid_fields(
                                         particles, self, self._current_chunk)
         for f, v in read_particles.items():
-            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
-            self.field_data[f].convert_to_units(finfos[f].output_units)
+            convert = True
+            if v.dtype != np.float64:
+                if finfos[f].units == "":
+                    self.field_data[f] = v
+                    convert = False
+                else:
+                    v = v.astype(np.float64)
+            if convert:
+                self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+                self.field_data[f].convert_to_units(finfos[f].output_units)
 
         fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
@@ -562,7 +581,7 @@
 
     def _setup_classes(self):
         # We don't allow geometric selection for non-spatial datasets
-        pass
+        self.objects = []
 
     @parallel_root_only
     def print_key_parameters(self):
@@ -685,3 +704,78 @@
             if data_type == "yt_profile":
                 return True
         return False
+
+class YTClumpContainer(TreeContainer):
+    def __init__(self, clump_id, global_id, parent_id,
+                 contour_key, contour_id, ds=None):
+        self.clump_id = clump_id
+        self.global_id = global_id
+        self.parent_id = parent_id
+        self.contour_key = contour_key
+        self.contour_id = contour_id
+        self.parent = None
+        self.ds = ds
+        TreeContainer.__init__(self)
+
+    def add_child(self, child):
+        if self.children is None:
+            self.children = []
+        self.children.append(child)
+        child.parent = self
+
+    def __repr__(self):
+        return "Clump[%d]" % self.clump_id
+
+    def __getitem__(self, field):
+        g = self.ds.data
+        f = g._determine_fields(field)[0]
+        if f[0] == "clump":
+            return g[f][self.global_id]
+        if self.contour_id == -1:
+            return g[f]
+        cfield = (f[0], "contours_%s" % self.contour_key.decode('utf-8'))
+        if f[0] == "grid":
+            return g[f][g[cfield] == self.contour_id]
+        return self.parent[f][g[cfield] == self.contour_id]
+
+class YTClumpTreeDataset(YTNonspatialDataset):
+    """Dataset for saved clump-finder data."""
+    def __init__(self, filename, unit_system="cgs"):
+        super(YTClumpTreeDataset, self).__init__(filename,
+                                                 unit_system=unit_system)
+        self._load_tree()
+
+    def _load_tree(self):
+        my_tree = {}
+        for i, clump_id in enumerate(self.data[("clump", "clump_id")]):
+            my_tree[clump_id] = YTClumpContainer(
+                clump_id, i, self.data["clump", "parent_id"][i],
+                self.data["clump", "contour_key"][i],
+                self.data["clump", "contour_id"][i], self)
+        for clump in my_tree.values():
+            if clump.parent_id == -1:
+                self.tree = clump
+            else:
+                parent = my_tree[clump.parent_id]
+                parent.add_child(clump)
+
+    _leaves = None
+    @property
+    def leaves(self):
+        if self._leaves is None:
+            self._leaves = []
+            for clump in self.tree:
+                if clump.children is None:
+                    self._leaves.append(clump)
+        return self._leaves
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = parse_h5_attr(f, "data_type")
+            if data_type is None:
+                return False
+            if data_type == "yt_clump_tree":
+                return True
+        return False

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -42,14 +42,13 @@
                 rv.update(gf)
             if len(rv) == len(fields): return rv
             f = h5py.File(u(g.filename), "r")
-            gds = f["data"]
             for field in fields:
                 if field in rv:
                     self._hits += 1
                     continue
                 self._misses += 1
                 ftype, fname = field
-                rv[(ftype, fname)] = gds[fname].value
+                rv[(ftype, fname)] = f[ftype][fname].value
             if self._cache_on:
                 for gid in rv:
                     self._cached_fields.setdefault(gid, {})

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -136,6 +136,9 @@
             field_name = field[1]
         else:
             field_name = field
+        # thanks, python3
+        if data[field].dtype.kind == 'U':
+            data[field] = data[field].astype('|S40')
         _yt_array_hdf5(fh[field_type], field_name, data[field])
         if "num_elements" not in fh[field_type].attrs:
             fh[field_type].attrs["num_elements"] = data[field].size

diff -r efbc6a51b1322148be1f956bc2e3b1f73a5b9198 -r ed78789acef4d8e51ecb4e5a40ee55ad31508d5d yt/utilities/tree_container.py
--- /dev/null
+++ b/yt/utilities/tree_container.py
@@ -0,0 +1,33 @@
+"""
+TreeContainer class and member functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class TreeContainer(object):
+    r"""A recursive data container for things like merger trees and
+    clump-finder trees.
+
+    """
+    _child_attr = "children"
+
+    def __init__(self):
+        setattr(self, self._child_attr, None)
+
+    def __iter__(self):
+        yield self
+        children = getattr(self, self._child_attr)
+        if children is None:
+            return
+        for child in children:
+            for a_node in child:
+                yield a_node

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list