[yt-svn] commit/yt: 46 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri May 12 08:02:33 PDT 2017


46 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/0889f90ec88d/
Changeset:   0889f90ec88d
Branch:      yt
User:        jzuhone
Date:        2017-02-27 15:18:56+00:00
Summary:     Move fnv_hash to its own Cython file so it may be used in multiple places
Affected #:  5 files

diff -r 25651334863bc48cb35ca88070de8aa8c5ffd855 -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,6 +37,7 @@
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
+yt/utilities/lib/fnv_hash.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c

diff -r 25651334863bc48cb35ca88070de8aa8c5ffd855 -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,6 +114,10 @@
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.fnv_hash",
+              ["yt/utilities/lib/fnv_hash.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=std_libs),
     Extension("yt.utilities.lib.geometry_utils",
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,

diff -r 25651334863bc48cb35ca88070de8aa8c5ffd855 -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -19,6 +19,7 @@
 cimport cython
 from cython cimport floating
 from libc.stdlib cimport malloc, free
+from yt.utilities.lib.fnv_hash cimport _fnv_hash as fnv_hash
 from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin, imin, imax
 from .oct_container cimport OctreeContainer, Oct
 cimport oct_visitors
@@ -45,16 +46,6 @@
 cdef np.float64_t grid_eps = np.finfo(np.float64).eps
 grid_eps = 0.0
 
-cdef np.int64_t fnv_hash(unsigned char[:] octets):
-    # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
-    # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
-    cdef np.int64_t hash_val = 2166136261
-    cdef char octet
-    for octet in octets:
-        hash_val = hash_val ^ octet
-        hash_val = hash_val * 16777619
-    return hash_val
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box

diff -r 25651334863bc48cb35ca88070de8aa8c5ffd855 -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 yt/utilities/lib/fnv_hash.pxd
--- /dev/null
+++ b/yt/utilities/lib/fnv_hash.pxd
@@ -0,0 +1,21 @@
+"""
+Definitions for fnv_hash
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import numpy as np
+cimport numpy as np
+
+cdef np.int64_t _fnv_hash(unsigned char[:] octets)

diff -r 25651334863bc48cb35ca88070de8aa8c5ffd855 -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 yt/utilities/lib/fnv_hash.pyx
--- /dev/null
+++ b/yt/utilities/lib/fnv_hash.pyx
@@ -0,0 +1,29 @@
+"""
+Fast hashing routines
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+
+cdef np.int64_t _fnv_hash(unsigned char[:] octets):
+    # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
+    # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
+    cdef np.int64_t hash_val = 2166136261
+    cdef char octet
+    for octet in octets:
+        hash_val = hash_val ^ octet
+        hash_val = hash_val * 16777619
+    return hash_val
+
+def fnv_hash(octets):
+    return _fnv_hash(octets)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/ad7e169ce0da/
Changeset:   ad7e169ce0da
Branch:      yt
User:        jzuhone
Date:        2017-02-27 18:08:32+00:00
Summary:     Create unit_system_id property for UnitRegistry that only gets set up when the Dataset class decides the unit registry is finished
Affected #:  1 file

diff -r 0889f90ec88d3f82a9302a2e243c8d822c740f80 -r ad7e169ce0da94ea0563635bbc7ff9cd9f02f895 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -18,6 +18,7 @@
 from distutils.version import LooseVersion
 from yt.units.unit_lookup_table import \
     default_unit_symbol_lut
+from yt.utilities.lib.fnv_hash import fnv_hash
 from yt.extern import six
 from sympy import \
     sympify, \
@@ -53,6 +54,17 @@
     def __contains__(self, item):
         return item in self.lut
 
+    _unit_system_id = None
+    @property
+    def unit_system_id(self):
+        if self._unit_system_id is None:
+            hash_data = bytearray()
+            for k, v in self.lut.items():
+                hash_data.extend(k.encode('ascii'))
+                hash_data.extend(repr(v).encode('ascii'))
+            self._unit_system_id = "code_unit_system_%d" % (-fnv_hash(hash_data))
+        return self._unit_system_id
+
     def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None):
         """
         Add a symbol to this registry.


https://bitbucket.org/yt_analysis/yt/commits/e15f3cf8095b/
Changeset:   e15f3cf8095b
Branch:      yt
User:        jzuhone
Date:        2017-02-27 18:10:03+00:00
Summary:     Refactor the unit system setup so that it uses the new code unit system id. This allows one to use "in_base('code')" instead of the more clunky "in_base(ds)". The latter is still accepted for backwards-compatibility.
Affected #:  3 files

diff -r ad7e169ce0da94ea0563635bbc7ff9cd9f02f895 -r e15f3cf8095b0e33b69ace1d079fb32b0d7d3daf yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -299,10 +299,10 @@
         self.no_cgs_equiv_length = False
 
         self._create_unit_registry()
-        self._assign_unit_system(unit_system)
 
         self._parse_parameter_file()
         self.set_units()
+        self._assign_unit_system(unit_system)
         self._setup_coordinate_handler()
 
         # Because we need an instantiated class to check the ds's existence in
@@ -918,9 +918,23 @@
         return self.refine_by**(l1-l0)
 
     def _assign_unit_system(self, unit_system):
-        create_code_unit_system(self)
+        current_mks_unit = None
+        if hasattr(self, 'magnetic_unit'):
+            # if the magnetic unit is in T, we need to create the code unit
+            # system as an MKS-like system
+            if current_mks in self.magnetic_unit.units.dimensions.free_symbols:
+                if unit_system == "code":
+                    current_mks_unit = 'A'
+                elif unit_system == 'mks':
+                    pass
+                else:
+                    self.magnetic_unit = \
+                        self.magnetic_unit.to_equivalent('gauss', 'CGS')
+            self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        create_code_unit_system(self.unit_registry, 
+                                current_mks_unit=current_mks_unit)
         if unit_system == "code":
-            unit_system = str(self)
+            unit_system = self.unit_registry.unit_system_id
         else:
             unit_system = str(unit_system).lower()
         self.unit_system = unit_system_registry[unit_system]
@@ -993,21 +1007,6 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        if hasattr(self, 'magnetic_unit'):
-            # if the magnetic unit is in T, we need to recreate the code unit
-            # system as an MKS-like system
-            if current_mks in self.magnetic_unit.units.dimensions.free_symbols:
-
-                if self.unit_system == unit_system_registry[str(self)]:
-                    unit_system_registry.pop(str(self))
-                    create_code_unit_system(self, current_mks_unit='A')
-                    self.unit_system = unit_system_registry[str(self)]
-                elif str(self.unit_system) == 'mks':
-                    pass
-                else:
-                    self.magnetic_unit = \
-                        self.magnetic_unit.to_equivalent('gauss', 'CGS')
-            self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         vel_unit = getattr(
             self, "velocity_unit", self.length_unit / self.time_unit)
         pressure_unit = getattr(

diff -r ad7e169ce0da94ea0563635bbc7ff9cd9f02f895 -r e15f3cf8095b0e33b69ace1d079fb32b0d7d3daf yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -475,10 +475,10 @@
                 raise YTUnitsNotReducible(self, "cgs")
             return yt_base_unit
         else:
-            if unit_system == "code":
-                raise RuntimeError(r'You must refer to a dataset instance to convert to a '
-                                   r'code unit system. Try again with unit_system=ds instead, '
-                                   r'where \'ds\' is your dataset.')
+            if hasattr(unit_system, "unit_registry"):
+                unit_system = unit_system.unit_registry.unit_system_id
+            elif unit_system == "code":
+                unit_system = self.registry.unit_system_id
             unit_system = unit_system_registry[str(unit_system)]
             units_string = _get_system_unit_string(self.dimensions, unit_system.base_units)
             u = Unit(units_string, registry=self.registry)

diff -r ad7e169ce0da94ea0563635bbc7ff9cd9f02f895 -r e15f3cf8095b0e33b69ace1d079fb32b0d7d3daf yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -40,8 +40,8 @@
     Parameters
     ----------
     name : string
-        The name of the unit system. Will be used as the key in the *unit_system_registry*
-        dict to reference the unit system by.
+        The name of the unit system. Will be used as the key in the 
+        *unit_system_registry* dict to reference the unit system by.
     length_unit : string
         The base length unit of this unit system.
     mass_unit : string
@@ -53,10 +53,11 @@
     angle_unit : string, optional
         The base angle unit of this unit system. Defaults to "rad".
     curent_mks_unit : string, optional
-        The base current unit of this unit system. Only used in MKS or MKS-based unit systems.
+        The base current unit of this unit system. Only used in MKS 
+        or MKS-based unit systems.
     registry : :class:`yt.units.unit_registry.UnitRegistry` object
-        The unit registry associated with this unit system. Only useful for defining unit
-        systems based on code units.
+        The unit registry associated with this unit system. Only 
+        useful for defining unit systems based on code units.
     """
     def __init__(self, name, length_unit, mass_unit, time_unit,
                  temperature_unit="K", angle_unit="rad", current_mks_unit=None,
@@ -108,10 +109,11 @@
                 repr += "  %s: %s\n" % (key, self.units_map[dim])
         return repr
 
-def create_code_unit_system(ds, current_mks_unit=None):
-    code_unit_system = UnitSystem(
-        str(ds), "code_length", "code_mass", "code_time", "code_temperature",
-        current_mks_unit=current_mks_unit, registry=ds.unit_registry)
+def create_code_unit_system(unit_registry, current_mks_unit=None):
+    code_unit_system = UnitSystem(unit_registry.unit_system_id, "code_length", 
+                                  "code_mass", "code_time", "code_temperature",
+                                  current_mks_unit=current_mks_unit, 
+                                  registry=unit_registry)
     code_unit_system["velocity"] = "code_velocity"
     if current_mks_unit:
         code_unit_system["magnetic_field_mks"] = "code_magnetic"
@@ -136,7 +138,8 @@
 mks_unit_system["magnetic_field_mks"] = "T"
 mks_unit_system["charge_mks"] = "C"
 
-imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", temperature_unit="R")
+imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", 
+                                  temperature_unit="R")
 imperial_unit_system["force"] = "lbf"
 imperial_unit_system["energy"] = "ft*lbf"
 imperial_unit_system["pressure"] = "lbf/ft**2"
@@ -147,9 +150,11 @@
 
 solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr")
 
-geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom")
+geometrized_unit_system = UnitSystem("geometrized", "l_geom", 
+                                     "m_geom", "t_geom")
 
-planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", temperature_unit="T_pl")
+planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", 
+                                temperature_unit="T_pl")
 planck_unit_system["energy"] = "E_pl"
 planck_unit_system["charge_cgs"] = "q_pl"
 


https://bitbucket.org/yt_analysis/yt/commits/5bc2ea999295/
Changeset:   5bc2ea999295
Branch:      yt
User:        jzuhone
Date:        2017-02-27 18:10:25+00:00
Summary:     Update unit systems tests
Affected #:  1 file

diff -r e15f3cf8095b0e33b69ace1d079fb32b0d7d3daf -r 5bc2ea9992957d0bec77216cfdb6ee7aa0a85d8d yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -69,11 +69,7 @@
         ds = load(gslr, unit_system=us)
         dd = ds.sphere("c", (15.,"kpc"))
         for field in test_fields:
-            if us == "code":
-                # For this dataset code units are cgs
-                v1 = dd_cgs[field]
-            else:
-                v1 = dd_cgs[field].in_base(us)
+            v1 = dd_cgs[field].in_base(us)
             v2 = dd[field]
             assert_almost_equal(v1.v, v2.v)
             assert str(v2.units) == test_units[us][field]
@@ -88,7 +84,7 @@
 
     for us in test_units:
         ds = load(etc, unit_system=us)
-        dd = ds.sphere("max", (500.,"kpc"))
+        dd = ds.sphere("max", (500., "kpc"))
         for field in test_fields:
             if us == "code":
                 v1 = dd_cgs[field].in_units(test_units["code"][field])


https://bitbucket.org/yt_analysis/yt/commits/6a9c48bff300/
Changeset:   6a9c48bff300
Branch:      yt
User:        jzuhone
Date:        2017-02-27 19:49:19+00:00
Summary:     Don’t take the negative here
Affected #:  1 file

diff -r 5bc2ea9992957d0bec77216cfdb6ee7aa0a85d8d -r 6a9c48bff30093c5614610976086dc89f679b89b yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -62,7 +62,7 @@
             for k, v in self.lut.items():
                 hash_data.extend(k.encode('ascii'))
                 hash_data.extend(repr(v).encode('ascii'))
-            self._unit_system_id = "code_unit_system_%d" % (-fnv_hash(hash_data))
+            self._unit_system_id = "code_unit_system_%d" % fnv_hash(hash_data)
         return self._unit_system_id
 
     def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None):


https://bitbucket.org/yt_analysis/yt/commits/6b28977dbc64/
Changeset:   6b28977dbc64
Branch:      yt
User:        jzuhone
Date:        2017-02-27 19:58:46+00:00
Summary:     Don’t need this much text
Affected #:  1 file

diff -r 6a9c48bff30093c5614610976086dc89f679b89b -r 6b28977dbc64281286dd4841710b975e252dbf2f yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -62,7 +62,7 @@
             for k, v in self.lut.items():
                 hash_data.extend(k.encode('ascii'))
                 hash_data.extend(repr(v).encode('ascii'))
-            self._unit_system_id = "code_unit_system_%d" % fnv_hash(hash_data)
+            self._unit_system_id = "code_%d" % fnv_hash(hash_data)
         return self._unit_system_id
 
     def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None):


https://bitbucket.org/yt_analysis/yt/commits/ff193b815c5f/
Changeset:   ff193b815c5f
User:        jzuhone
Date:        2017-05-05 14:09:53+00:00
Summary:     Now the unit system doesn't exist yet, and this is not strictly necessary anyway
Affected #:  1 file

diff -r 6b28977dbc64281286dd4841710b975e252dbf2f -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -222,9 +222,6 @@
                 # comparisons are insufficient
                 unit = Unit(unit, registry=self.unit_registry)
                 if unit_name.endswith('_unit') and unit.dimensions is sympy_one:
-                    un = unit_name[:-5]
-                    un = un.replace('magnetic', 'magnetic_field', 1)
-                    unit = self.unit_system[un]
                     setdefaultattr(self, unit_name, self.quan(value, unit))
                 setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:


https://bitbucket.org/yt_analysis/yt/commits/5e3a51792b49/
Changeset:   5e3a51792b49
User:        jzuhone
Date:        2017-05-05 14:28:58+00:00
Summary:     Merge branch 'master' into code_units
Affected #:  238 files

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 .github/ISSUE_TEMPLATE.md
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,57 @@
+<!--To help us understand and resolve your issue, please fill out the form to
+the best of your ability.-->
+<!--You can feel free to delete the sections that do not apply.-->
+
+### Bug report
+
+**Bug summary**
+
+<!--A short 1-2 sentences that succinctly describes the bug-->
+
+**Code for reproduction**
+
+<!--A minimum code snippet required to reproduce the bug, also minimizing the
+number of dependencies required.-->
+
+<!-- If you need to use a data file to trigger the issue you're having, consider
+using one of the datasets from the yt data hub (http://yt-project.org/data). If
+your issue cannot be triggered using a public dataset, you can use the yt
+curldrop (https://docs.hub.yt/services.html#curldrop) to share data
+files. Please include a link to the dataset in the issue if you use the
+curldrop.-->
+
+```python
+# Paste your code here
+#
+#
+```
+
+**Actual outcome**
+
+<!--The output produced by the above code, which may be a screenshot, console
+output, etc.-->
+
+```
+# If applicable, paste the console output here
+#
+#
+```
+
+**Expected outcome**
+
+<!--A description of the expected outcome from the code snippet-->
+<!--If this used to work in an earlier version of yt, please note the
+version it used to work on-->
+
+**Version Information**
+<!--Please specify your platform and versions of the relevant libraries you are
+using:-->
+  * Operating System:
+  * Python Version:
+  * yt version:
+  * Other Libraries (if applicable): 
+
+<!--Please tell us how you installed yt and python e.g., from source,
+pip, conda. If you installed from conda, please specify which channel you used
+if not the default-->
+

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 .github/PULL_REQUEST_TEMPLATE.md
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,31 @@
+<!--Thank you so much for your PR! To help us review, fill out the form
+to the best of your ability.  Please make use of the development guide at
+http://yt-project.org/docs/dev/developing/index.html-->
+
+<!--Provide a general summary of your changes in the title above, for
+example "Raises ValueError on Non-Numeric Input to set_xlim".  Please avoid
+non-descriptive titles such as "Addresses issue #8576".-->
+
+<!--If you are able to do so, please do not create the PR out of master, but out
+of a separate branch. -->
+
+## PR Summary
+
+<!--Please provide at least 1-2 sentences describing the pull request in
+detail.  Why is this change required?  What problem does it solve?-->
+
+<!--If it fixes an open issue, please link to the issue here.-->
+
+## PR Checklist
+
+<!-- Note that some of these check boxes may not apply to all pull requests -->
+
+- [ ] Code passes flake8 checker
+- [ ] New features are documented, with docstrings and narrative docs
+- [ ] Adds a test for any bugs fixed. Adds tests for new features.
+
+<!--We understand that PRs can sometimes be overwhelming, especially as the
+reviews start coming in.  Please let us know if the reviews are unclear or the
+recommended next step seems overly demanding , or if you would like help in
+addressing a reviewer's comments.  And please ping us if you've been waiting
+too long to hear back on your PR.-->

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 .gitignore
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,87 @@
+build
+yt.egg-info
+__config__.py
+freetype.cfg
+hdf5.cfg
+png.cfg
+rockstar.cfg
+yt_updater.log
+yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
+yt/analysis_modules/ppv_cube/ppv_utils.c
+yt/analysis_modules/photon_simulator/utils.c
+yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
+yt/geometry/fake_octree.c
+yt/geometry/grid_container.c
+yt/geometry/grid_visitors.c
+yt/geometry/oct_container.c
+yt/geometry/oct_visitors.c
+yt/geometry/particle_deposit.c
+yt/geometry/particle_oct_container.c
+yt/geometry/particle_smooth.c
+yt/geometry/selection_routines.c
+yt/utilities/amr_utils.c
+yt/utilities/lib/autogenerated_element_samplers.c
+yt/utilities/kdtree/forthonf2c.h
+yt/utilities/libconfig_wrapper.c
+yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/allocation_container.c
+yt/utilities/lib/alt_ray_tracers.c
+yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
+yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
+yt/utilities/lib/distance_queue.c
+yt/utilities/lib/element_mappings.c
+yt/utilities/lib/fortran_reader.c
+yt/utilities/lib/freetype_writer.c
+yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
+yt/utilities/lib/image_utilities.c
+yt/utilities/lib/interpolators.c
+yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_triangulation.c
+yt/utilities/lib/mesh_utilities.c
+yt/utilities/lib/misc_utilities.c
+yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/primitives.c
+yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/pixelization_routines.c
+yt/utilities/lib/png_writer.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
+yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/cosmology_time.c
+yt/utilities/lib/grid_traversal.c
+yt/utilities/lib/marching_cubes.c
+yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
+*.pyc
+*.pyd
+.*.swp
+*.so
+.idea/*
+tests/results/*
+doc/build/*
+doc/source/reference/api/generated/*
+doc/_temp/*
+doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 .mailmap
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,89 @@
+Stephen Skory <s at skory.us> Stephen Skory <stephenskory at yahoo.com>
+Stephen Skory <s at skory.us> "Stephen Skory stephenskory at yahoo.com" <"Stephen Skory stephenskory at yahoo.com">
+Yuan Li <yuan at astro.columbia.edu> Yuan Li <bear0980 at gmail.com>
+Chris Moody <juxtaposicion at gmail.com> Chris Moody <cemoody at ucsc.edu>
+Chris Moody <juxtaposicion at gmail.com> Christopher Erick Moody <cemoody at ucsc.edu>
+Cameron Hummels <chummels at gmail.com> Cameron Hummels <chummels at astro.columbia.edu>
+Cameron Hummels <chummels at gmail.com> chummels <chummels at gmail.com>
+John Wise <jwise at physics.gatech.edu> John Wise <jwise at astro.princeton.edu>
+Sam Skillman <samskillman at gmail.com> Sam Skillman <sam.skillman at gmail.com>
+Sam Skillman <samskillman at gmail.com> samskillman <sam.skillman at gmail.com>
+Casey Stark <caseywstark at gmail.com> Casey Stark <casey at thestarkeffect.com>
+Christian Karch <chiffre at posteo.de> chiffre <none at none>
+Christian Karch <chiffre at posteo.de> Christian Karch <none at none>
+Andrew Myers <atmyers2 at gmail.com> Andrew Myers <atmyers at berkeley.edu>
+Andrew Myers <atmyers2 at gmail.com> atmyers <none at none>
+Douglas Rudd <drudd at uchicago.edu> drudd <none at none>
+Andrew Wetzel <andrew.wetzel at yale.edu> awetzel <none at none>
+David Collins <dcollins4096 at gmail.com> David Collins <dcollins at physics.ucsd.edu>
+David Collins <dcollins4096 at gmail.com> dcollins4096 <none at none>
+David Collins <dcollins4096 at gmail.com> David Collins (dcollins4096 at gmail.com) <David Collins (dcollins4096 at gmail.com)>
+Tom Abel <tabel at slac.stanford.edu> tabel <none at none>
+Kaylea Nelson <kaylea.nelson at yale.edu> sername=kayleanelson <none at none>
+Kaylea Nelson <kaylea.nelson at yale.edu> kayleanelson <none at none
+Kaylea Nelson <kaylea.nelson at yale.edu> kayleanelson <none at none>
+Stephanie Tonnesen <stonnes at gmail.com> stonnes <stonnes at gmail.com>
+John Forbes <jcforbes at ucsc.edu> John Forbes <jforbes at ucolick.org>
+Elliott Biondo <biondo at wisc.edu> Elliott Biondo <Biondo at wisc.edu>
+Sam Geen <samgeen at gmail.com> Sam Geen <samgeen at googlemail.com>
+Alex Bogert <fbogert at ucsc.edu> fbogert <none at none>
+Brian O'Shea <oshea at msu.edu> bwoshea <none at none>
+Ji-hoon Kim <me at jihoonkim.org> Ji-hoon Kim <mornkr at slac.stanford.edu>
+Kirk Barrow <kssbarrow at gatech.edu> Kirk Barrow <kssbarrow at gmail.com>
+Kirk Barrow <kssbarrow at gatech.edu> Kirk Barrow <kassbarrow at gmail.com>
+Kirk Barrow <kssbarrow at gatech.edu> kbarrow <none at none>
+Kirk Barrow <kssbarrow at gatech.edu> kbarrow <kassbarrow at gmail.com>
+Antoine Strugarek <antoine.strugarek at cea.fr> Antoine Strugarek <strugarek at astro.umontreal.ca>
+Antoine Strugarek <antoine.strugarek at cea.fr> astrugarek <strugarek at astro.umontreal.ca>
+Antoine Strugarek <antoine.strugarek at cea.fr> Antoine Strugarek <antoine.strugarek at cea.fr>
+Anna Rosen <rosen at ucolick.org> Anna Rosen <alrosen at ucsc.edu>
+John ZuHone <jzuhone at gmail.com> jzuhone <none at none>
+John ZuHone <jzuhone at gmail.com> jzuhone <jzuhone at gmail.com>
+Kenza Arraki <karraki at nmsu.edu> Kenza Arraki <karraki at gmail.com>
+Allyson Julian <astrohckr at gmail.com> Allyson Julian <hckr at eml.cc>
+Allyson Julian <astrohckr at gmail.com> Allyson Julian <julian3 at illinois.edu>
+Allyson Julian <astrohckr at gmail.com> Allyson Julian <aj at hckr.eml.cc>
+Allyson Julian <astrohckr at gmail.com> AJ <astrohckr at gmail.com>
+Allyson Julian <astrohckr at gmail.com> Al Jul <hckr at eml.cc>
+Allyson Julian <astrohckr at gmail.com> AJ <aj at hckr.eml.cc>
+Allyson Julian <astrohckr at gmail.com> AJ <hckr at eml.cc>
+Brian Crosby <bcrosby.bd at gmail.com> bcrosby <bcrosby.bd at gmail.com>
+Ben Thompson <bthompson2090 at gmail.com> Ben Thompson <none at none>
+Ben Thompson <bthompson2090 at gmail.com> cosmosquark <none at none>
+Chris Malone <chris.m.malone at gmail.com> Chris Malone <chris.m.malone at lanl.gov>
+Chris Malone <chris.m.malone at gmail.com> ChrisMalone <chris.m.malone at gmail.com>
+Chris Malone <chris.m.malone at gmail.com> ChrisMalone <chris.m.malone at lanl.gov>
+Jill Naiman <jnaiman at cfa.harvard.edu> Jill Naiman <jnaiman at ucolick.org>
+Jill Naiman <jnaiman at cfa.harvard.edu> jnaiman <none at none>
+Jill Naiman <jnaiman at cfa.harvard.edu> jnaiman <jnaiman at ucolick.org>
+Ricarda Beckmann <ricarda.beckmann at astro.ox.ac.uk> RicardaBeckmann <ricarda.beckmann at astro.ox.ac.uk>
+Miguel de Val-Borro <miguel.deval at gmail.com> Miguel de Val-Borro <miguel at archlinux.net>
+Stuary Levy <salevy at illinois.edu> Stuart Levy <slevy at ncsa.illinois.edu>
+Ben Keller <malzraa at gmail.com> Ben Keller <kellerbw at mcmaster.ca>
+Daniel Fenn <df11c at my.fsu.edu> dfenn <none at none>
+Meagan Lang <langmm.astro at gmail.com> langmm <none at none>
+Joseph Tomlinson <jmtomlinson95 at gmail.com> jmt354 <none at none>
+Desika Narayanan <dnarayan at haverford.edu> desika <none at none>
+Nathan Goldbaum <ngoldbau at illinois.edu> Nathan Goldbaum <goldbaum at ucolick.org>
+Nathan Goldbaum <ngoldbau at illinois.edu> Nathan Goldbaum <ngoldbau at ucsc.edu>
+Fabian Koller <anokfireball at poseto.de> NTAuthority at honeypot.fritz.box <NTAuthority at honeypot.fritz.box>
+Fabian Koller <anokfireball at posteo.de> NTAuthority at guest053.fz-rossendorf.de <NTAuthority at guest053.fz-rossendorf.de>
+Fabian Koller <anokfireball at posteo.de> NTAuthority at guest692.fz-rossendorf.de <NTAuthority at guest692.fz-rossendorf.de>
+Fabian Koller <anokfireball at posteo.de> NTAuthority at guest692.fz-rossendorf.de <NTAuthority at guest692.fz-rossendorf.de>
+Fabian Koller <anokfireball at posteo.de> Fabian Koller <none at none>
+Rafael Ruggiero <rafael.ruggiero at usp.br> Rafael Ruggiero <none at none>
+John Regan <john.regan at helsinki.fi> John Regan <john.a.regan at durham.ac.uk>
+Axel Huebl <a.huebl at hzdr.de> Axel Huebl <code at andre-bubel.de>
+Gabriel Altay <gabriel.altay at gmail.com> galtay <gabriel.altay at gmail.com>
+Mark Richardson <Mark.Richardson.Work at gmail.com> Mark Richardson <Mark.L.Richardson at asu.edu>
+Jared Coughlin <jcoughl2 at nd.edu> Jared <none at none>
+Corentin Cadiou <corentin.cadiou at iap.fr> cphyc <contact at cphyc.me>
+Corentin Cadiou <corentin.cadiou at iap.fr> ccc at pingouin.local <ccc at pingouin.local>
+Corentin Cadiou <corentin.cadiou at iap.fr> ccc at pingouin-2.local <ccc at pingouin-2.local>
+Corentin Cadiou <corentin.cadiou at iap.fr> Corentin Cadiou <contact at cphyc.me>
+convert-repo <none at none> None <none at none>
+Patrick Shriwise <shriwise at wisc.edu> pshriwise <shriwise at wisc.edu>
+Michael Zingale <michael.zingale at stonybrook.edu> Michael  Zingale <michael.zingale at stonybrook.edu>
+Sam Leitner <sam.leitner at gmail.com> Sam  Leitner <sam.leitner at gmail.com>
+Geoffrey So <gsiisg at gmail.com> gsiisg <gsiisg at gmail.com>
+Matthew Turk <matthewturk at gmail.com> Matt Turk <matthewturk at gmail.com>
\ No newline at end of file

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@
    You can connect through our web
    gateway without any special client, at http://yt-project.org/irc.html .
    *IRC is the first stop for conversation!*
- * Many yt developers participate in the yt Slack community. Slack is a free 
+ * Many yt developers participate in the yt Slack community. Slack is a free
    chat service that many teams use to organize their work. You can get an
    invite to yt's Slack organization by clicking the "Join us @ Slack" button
    on this page: http://yt-project.org/community.html
@@ -54,18 +54,18 @@
 help out.
 
 The easiest way to help out is to fork the main yt repository (where the
-documentation lives in the ``doc`` directory in the root of the yt mercurial
+documentation lives in the ``doc`` directory in the root of the yt git
 repository) and then make your changes in your own fork.  When you are done,
 issue a pull request through the website for your new fork, and we can comment
 back and forth and eventually accept your changes. See :ref:`sharing-changes` for
-more information about contributing your changes to yt on bitbucket.
+more information about contributing your changes to yt on GitHub.
 
 Gallery Images and Videos
 -------------------------
 
 If you have an image or video you'd like to display in the image or video
 galleries, getting it included it easy!  You can either fork the `yt homepage
-repository <http://bitbucket.org/yt_analysis/website>`_ and add it there, or
+repository <http://github.com/yt-project/website>`_ and add it there, or
 email it to us and we'll add it to the `Gallery
 <http://yt-project.org/gallery.html>`_.
 
@@ -80,20 +80,20 @@
 bug fixes, new features, analysis modules, or a new code frontend.  See
 :ref:`creating_frontend` for more details.
 
-The process is pretty simple: fork on BitBucket, make changes, issue a pull
+The process is pretty simple: fork on GitHub, make changes, issue a pull
 request.  We can then go back and forth with comments in the pull request, but
 usually we end up accepting.
 
 For more information, see :ref:`contributing-code`, where we spell out how to
 get up and running with a development environment, how to commit, and how to
-use BitBucket.
+use GitHub.
 
 Online Presence
 ---------------
 
 Some of these fall under the other items, but if you'd like to help out with
 the website or any of the other ways yt is presented online, please feel free!
-Almost everything is kept in hg repositories on BitBucket, and it is very easy
+Almost everything is kept in git repositories on GitHub, and it is very easy
 to fork and contribute back changes.
 
 Please feel free to dig in and contribute changes.
@@ -210,19 +210,19 @@
 yt is a community project!
 
 We are very happy to accept patches, features, and bugfixes from any member of
-the community!  yt is developed using mercurial, primarily because it enables
-very easy and straightforward submission of changesets.  We're eager to hear
+the community!  yt is developed using git, primarily because it enables
+very easy and straightforward submission of revisions.  We're eager to hear
 from you, and if you are developing yt, we encourage you to subscribe to the
 `developer mailing list
 <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_. Please feel
 free to hack around, commit changes, and send them upstream.
 
-.. note:: If you already know how to use the `mercurial version control system
-   <http://mercurial-scm.org>`_ and are comfortable with handling it yourself,
-   the quickest way to contribute to yt is to `fork us on BitBucket
-   <http://bitbucket.org/yt_analysis/yt/fork>`_, make your changes, push the
+.. note:: If you already know how to use the `git version control system
+   <https://git-scm.com/>`_ and are comfortable with handling it yourself,
+   the quickest way to contribute to yt is to `fork us on GitHub
+   <https://github.com/yt-project/yt/fork>`_, make your changes, push the
    changes to your fork and issue a `pull request
-   <http://bitbucket.org/yt_analysis/yt/pull-requests>`_.  The rest of this
+   <https://github.com/yt-project/yt/pulls>`_.  The rest of this
    document is just an explanation of how to do that.
 
 See :ref:`code-style-guide` for more information about coding style in yt and
@@ -238,8 +238,8 @@
 -----------
 
 If you're interested in participating in yt development, take a look at the
-`issue tracker on bitbucket
-<https://bitbucket.org/yt_analysis/yt/issues?milestone=easy?status=new>`_.
+`issue tracker on GitHub
+<https://github.com/yt-project/yt/issues>`_.
 Issues are marked with a milestone of "easy", "moderate", or "difficult"
 depending on the estimated level of difficulty for fixing the issue. While we
 try to triage the issue tracker regularly, it may be the case that issues marked
@@ -248,9 +248,8 @@
 
 Here are some predefined issue searches that might be useful:
 
-* Unresolved issues `marked "easy" <https://bitbucket.org/yt_analysis/yt/issues?milestone=easy&status=open&status=new>`_.
-* Unresolved issues `marked "easy" or "moderate" <https://bitbucket.org/yt_analysis/yt/issues?milestone=easy&milestone=moderate&status=open&status=new>`_
-* `All unresolved issues <https://bitbucket.org/yt_analysis/yt/issues?status=open&status=new>`_
+* Unresolved issues `marked "easy" <https://github.com/yt-project/yt/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aopen%20milestone%3Aeasy>`_.
+* `All unresolved issues <https://github.com/yt-project/yt/issues>`_
 
 Submitting Changes
 ------------------
@@ -258,7 +257,7 @@
 We provide a brief introduction to submitting changes here.  yt thrives on the
 strength of its communities (http://arxiv.org/abs/1301.7064 has further
 discussion) and we encourage contributions from any user.  While we do not
-discuss version control, mercurial or the advanced usage of BitBucket in detail
+discuss version control, git, or the advanced usage of GitHub in detail
 here, we do provide an outline of how to submit changes and we are happy to
 provide further assistance or guidance.
 
@@ -275,48 +274,49 @@
 How To Get The Source Code For Editing
 ++++++++++++++++++++++++++++++++++++++
 
-yt is hosted on BitBucket, and you can see all of the yt repositories at
-http://bitbucket.org/yt_analysis/.  With the yt installation script you should have a
-copy of Mercurial for checking out pieces of code.  Make sure you have followed
+yt is hosted on GitHub, and you can see all of the yt repositories at
+https://github.com/yt-project/.  With the yt installation script you should have a
+copy of git for checking out pieces of code.  Make sure you have followed
 the steps above for bootstrapping your development (to assure you have a
-bitbucket account, etc.)
+GitHub account, etc.)
 
 In order to modify the source code for yt, we ask that you make a "fork" of the
-main yt repository on bitbucket.  A fork is simply an exact copy of the main
+main yt repository on GitHub.  A fork is simply an exact copy of the main
 repository (along with its history) that you will now own and can make
 modifications as you please.  You can create a personal fork by visiting the yt
-bitbucket webpage at https://bitbucket.org/yt_analysis/yt/ .  After logging in,
-you should see an option near the top right labeled "fork".  Click this option,
-and then click the fork repository button on the subsequent page.  You now have
+GitHub webpage at https://github.com/yt-project/yt/ .  After logging in,
+you should see an option near the top right labeled "fork". You now have
 a forked copy of the yt repository for your own personal modification.
 
-This forked copy exists on the bitbucket repository, so in order to access
-it locally, follow the instructions at the top of that webpage for that
-forked repository, namely run at a local command line:
+This forked copy exists on the GitHub repository, so in order to access
+it locally you must clone it onto your machine from the command line:
 
 .. code-block:: bash
 
-   $ hg clone http://bitbucket.org/<USER>/<REPOSITORY_NAME>
+   $ git clone https://github.com/<USER>/yt
 
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory. You should also run the following command, to make sure you are at
-the "yt" branch, and not other ones like "stable" (this will be important
-later when you want to submit your pull requests):
+directory.
 
 .. code-block:: bash
 
-   $ hg update yt
+   $ cd <REPOSITORY_NAME>
+
+Verify that you are on the master branch of yt by running:
 
-You can see any past state of the code by using the hg log command.
-For example, the following command would show you the last 5 changesets
+.. code-block:: bash
+
+   $ git branch
+
+You can see any past state of the code by using the git log command.
+For example, the following command would show you the last 5 revisions
 (modifications to the code) that were submitted to that repository.
 
 .. code-block:: bash
 
-   $ cd <REPOSITORY_NAME>
-   $ hg log -l 5
+   $ git log -n 5
 
 Using the revision specifier (the number or hash identifier next to each
 changeset), you can update the local repository to any past state of the
@@ -324,7 +324,7 @@
 
 .. code-block:: bash
 
-   $ hg up revision_specifier
+   $ git checkout revision_specifier
 
 Lastly, if you want to use this new downloaded version of your yt repository as
 the *active* version of yt on your computer (i.e. the one which is executed when
@@ -334,8 +334,7 @@
 
 .. code-block:: bash
 
-   $ cd <REPOSITORY_NAME>
-   $ python2.7 setup.py develop
+   $ python setup.py develop
 
 This will rebuild all C modules as well.
 
@@ -346,11 +345,11 @@
 
 If you just want to *look* at the source code, you may already have it on your
 computer.  If you build yt using the install script, the source is available at
-``$YT_DEST/src/yt-hg``.  See :ref:`source-installation` for more details about
+``$YT_DEST/src/yt-git``.  See :ref:`source-installation` for more details about
 to obtain the yt source code if you did not build yt using the install
 script.
 
-The root directory of the yt mercurial repository contains a number of
+The root directory of the yt git repository contains a number of
 subdirectories with different components of the code.  Most of the yt source
 code is contained in the yt subdirectory.  This directory its self contains
 the following subdirectories:
@@ -431,49 +430,31 @@
 rebuild yt.  If your changes have exclusively been to Python modules, you will
 not need to re-build, but (see below) you may need to re-install.
 
+Note that you will need a functioning compilation environment to build yt. On
+linux this typically means installing the package that sets up a basic build
+environment (e.g. ``build-essential`` on Debian and Ubuntu). On MacOS this means
+installing the XCode command line tools. On Windows this means installing the
+version of the Microsoft Visual C++ compiler that is appropriate for your
+version of Python. See `the Python wiki
+<https://wiki.python.org/moin/WindowsCompilers>`_ for more details.
+
 If you are running from a clone that is executable in-place (i.e., has been
 installed via the installation script or you have run ``setup.py develop``) you
 can rebuild these modules by executing:
 
 .. code-block:: bash
 
-  $ python2.7 setup.py develop
+  $ python setup.py develop
 
 If you have previously "installed" via ``setup.py install`` you have to
 re-install:
 
 .. code-block:: bash
 
-  $ python2.7 setup.py install
+  $ python setup.py install
 
 Only one of these two options is needed.
 
-.. _windows-developing:
-
-Developing yt on Windows
-------------------------
-
-If you plan to develop yt on Windows, it is necessary to use the `MinGW
-<http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
-installed from Anaconda as well. These can both be installed with a single command:
-
-.. code-block:: bash
-
-  $ conda install libpython mingw
-
-Additionally, the syntax for the setup command is slightly different; you must type:
-
-.. code-block:: bash
-
-  $ python2.7 setup.py build --compiler=mingw32 develop
-
-or
-
-.. code-block:: bash
-
-  $ python2.7 setup.py build --compiler=mingw32 install
-
 .. _requirements-for-code-submission:
 
 Requirements for Code Submission
@@ -513,68 +494,61 @@
 out with them.  In :ref:`code-style-guide` there is a list of handy tips for
 how to structure and write your code.
 
-.. _mercurial-with-yt:
+.. _git-with-yt:
 
-How to Use Mercurial with yt
-----------------------------
+How to Use git with yt
+----------------------
 
-If you're new to Mercurial, these three resources are pretty great for learning
+If you're new to git, the following resource is pretty great for learning
 the ins and outs:
 
-* http://hginit.com/
-* http://hgbook.red-bean.com/read/
-* http://mercurial-scm.org/
-* http://mercurial-scm.org/wiki
+* https://git-scm.com/
 
-The commands that are essential for using mercurial include:
+The commands that are essential for using git include:
 
-* ``hg help`` which provides help for any mercurial command. For example, you
-  can learn more about the ``log`` command by doing ``hg help log``. Other useful
-  topics to use with ``hg help`` are ``hg help glossary``, ``hg help config``,
-  ``hg help extensions``, and ``hg help revsets``.
-* ``hg commit`` which commits changes in the working directory to the
-  repository, creating a new "changeset object."
-* ``hg add`` which adds a new file to be tracked by mercurial.  This does
-  not change the working directory.
-* ``hg pull`` which pulls (from an optional path specifier) changeset
-  objects from a remote source.  The working directory is not modified.
-* ``hg push`` which sends (to an optional path specifier) changeset objects
-  to a remote source.  The working directory is not modified.
-* ``hg log`` which shows a log of all changeset objects in the current
-  repository.  Use ``-G`` to show a graph of changeset objects and their
-  relationship.
-* ``hg update`` which (with an optional "revision" specifier) updates the
-  state of the working directory to match a changeset object in the
-  repository.
-* ``hg merge`` which combines two changesets to make a union of their lines
-  of development.  This updates the working directory.
+* ``git <command> --help`` which provides help for any git command. For example, you
+  can learn more about the ``log`` command by doing ``git log --help``.
+* ``git add <paths>`` which stages changes to the specified paths for subsequent
+  committing (see below).
+* ``git commit`` which commits staged changes (stage using ``git add`` as above)
+  in the working directory to the repository, creating a new "revision."
+* ``git merge <branch>`` which merges the revisions from the specified branch
+  into the current branch, creating a union of their lines of development. This
+  updates the working directory.
+* ``git pull <remote><branch>`` which pulls revisions from the specified branch of the
+  specified remote repository into the current local branch. Equivalent to ``git
+  fetch <remote>`` and then ``git merge <remote>/<branch>``. This updates the
+  working directory.
+* ``git push <remote>`` which sends revisions on local branches to matching
+  branches on the specified remote. ``git push <remote><branch>`` will only
+  push changes for the specified branch.
+* ``git log`` which shows a log of all revisions on the current branch. There
+  are many options you can pass to ``git log`` to get additional
+  information. One example is ``git log --oneline --decorate --graph --all``.
 
-We are happy to asnswers questions about mercurial use on our IRC, slack
+We are happy to answer questions about git use on our IRC, slack
 chat or on the mailing list to walk you through any troubles you might have.
-Here are some general suggestions for using mercurial with yt:
+Here are some general suggestions for using git with yt:
 
-* Named branches are to be avoided.  Try using bookmarks (``see hg help
-  bookmark``) to track work.  (`More info about bookmarks is available on the
-  mercurial wiki <http://mercurial-scm.org/wiki/Bookmarks>`_)
-* Make sure you set a username in your ``~/.hgrc`` before you commit any
-  changes!  All of the tutorials above will describe how to do this as one of
-  the very first steps.
+* Although not necessary, a common development work flow is to create a local
+  named branch other than ``master`` to address a feature request or bugfix. If
+  the dev work addresses a specific yt GitHub issue, you may include that issue
+  number in the branch name. For example, if you want to work on issue number X
+  regarding a cool new slice plot feature, you might name the branch:
+  ``cool_new_plot_feature_X``. When you're ready to share your work, push your
+  feature branch to your remote and create a pull request to the ``master``
+  branch of the yt-project's repository.
 * When contributing changes, you might be asked to make a handful of
   modifications to your source code.  We'll work through how to do this with
   you, and try to make it as painless as possible.
 * Your test may fail automated style checks. See :ref:`code-style-guide` for
   more information about automatically verifying your code style.
-* Please avoid deleting your yt forks, as that deletes the pull request
-  discussion from process from BitBucket's website, even if your pull request
-  is merged.
 * You should only need one fork.  To keep it in sync, you can sync from the
-  website. See Bitbucket's `Blog Post
-  <https://blog.bitbucket.org/2013/02/04/syncing-and-merging-come-to-bitbucket/>`_
-  about this. See :ref:`sharing-changes` for a description of the basic workflow
+  website. See :ref:`sharing-changes` for a description of the basic workflow
   and :ref:`multiple-PRs` for a discussion about what to do when you want to
   have multiple open pull requests at the same time.
-* If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
-  list.
+* If you run into any troubles, stop by IRC (see :ref:`irc`), Slack, or the
+  mailing list.
 
 .. _sharing-changes:
 
@@ -583,30 +557,34 @@
 
 The simplest way to submit changes to yt is to do the following:
 
-* Build yt from the mercurial repository
+* Build yt from the git repository
 * Navigate to the root of the yt repository
 * Make some changes and commit them
-* Fork the `yt repository on BitBucket <https://bitbucket.org/yt_analysis/yt>`_
+* Fork the `yt repository on GitHub <https://github.com/yt-project/yt>`_
 * Push the changesets to your fork
 * Issue a pull request.
 
 Here's a more detailed flowchart of how to submit changes.
 
 #. If you have used the installation script, the source code for yt can be
-   found in ``$YT_DEST/src/yt-hg``.  Alternatively see
+   found in ``$YT_DEST/src/yt-git``.  Alternatively see
    :ref:`source-installation` for instructions on how to build yt from the
-   mercurial repository. (Below, in :ref:`reading-source`, we describe how to
+   git repository. (Below, in :ref:`reading-source`, we describe how to
    find items of interest.)
 #. Edit the source file you are interested in and
    test your changes.  (See :ref:`testing` for more information.)
-#. Fork yt on BitBucket.  (This step only has to be done once.)  You can do
-   this at: https://bitbucket.org/yt_analysis/yt/fork.  Call this repository
+#. Fork yt on GitHub.  (This step only has to be done once.)  You can do
+   this at: https://github.com/yt-project/yt/fork.  Call this repository
    yt.
-#. Create a bookmark to track your work. For example: ``hg bookmark
-   my-first-pull-request``
-#. Commit these changes, using ``hg commit``.  This can take an argument
-   which is a series of filenames, if you have some changes you do not want
-   to commit.
+#. Create a uniquely named branch to track your work. For example: ``git
+   checkout -b my-first-pull-request``
+#. Stage your changes using ``git add <paths>``.  This command take an argument
+   which is a series of filenames whose changes you want to commit. After
+   staging, execute ``git commit -m "<Commit description>. Addresses Issue
+   #X"``. Note that supplying an actual GitHub issue # in place of ``X`` will
+   cause your commit to appear in the issue tracker after pushing to your
+   remote. This can be very helpful for others who are interested in what work
+   is being done in connection to that issue.
 #. Remember that this is a large development effort and to keep the code
    accessible to everyone, good documentation is a must.  Add in source code
    comments for what you are doing.  Add in docstrings
@@ -616,32 +594,36 @@
 #. If your changes include new functionality or cover an untested area of the
    code, add a test.  (See :ref:`testing` for more information.)  Commit
    these changes as well.
-#. Push your changes to your new fork using the command::
-
-      hg push -B my-first-pull-request https://bitbucket.org/YourUsername/yt/
+#. Add your remote repository with a unique name identifier. It can be anything;
+   your GitHub user name is one possible choice::
 
-   Where you should substitute the name of the bookmark you are working on for
-   ``my-first-pull-request``. If you end up doing considerable development, you
-   can set an alias in the file ``.hg/hgrc`` to point to this path.
+      git remote add <YourUniqueIdentifier> https://github.com/YourUsername/yt/
+  
+#. Push your changes to your remote fork using the unique identifier you just
+   created and the command::
+
+      git push <YourUniqueIdentifier> my-first-pull-request
+
+   Where you should substitute the name of the feature branch you are working on for
+   ``my-first-pull-request``.
 
    .. note::
      Note that the above approach uses HTTPS as the transfer protocol
-     between your machine and BitBucket.  If you prefer to use SSH - or
+     between your machine and GitHub.  If you prefer to use SSH - or
      perhaps you're behind a proxy that doesn't play well with SSL via
-     HTTPS - you may want to set up an `SSH key`_ on BitBucket.  Then, you use
-     the syntax ``ssh://hg@bitbucket.org/YourUsername/yt``, or equivalent, in
-     place of ``https://bitbucket.org/YourUsername/yt`` in Mercurial commands.
+     HTTPS - you may want to set up an `SSH key`_ on GitHub.  Then, you use
+     the syntax ``ssh://git@github.com/YourUsername/yt``, or equivalent, in
+     place of ``https://github.com/YourUsername/yt`` in git commands.
      For consistency, all commands we list in this document will use the HTTPS
      protocol.
 
-     .. _SSH key: https://confluence.atlassian.com/display/BITBUCKET/Set+up+SSH+for+Mercurial
+     .. _SSH key: https://help.github.com/articles/connecting-to-github-with-ssh/
 
 #. Issue a pull request at
-   https://bitbucket.org/YourUsername/yt/pull-request/new
+   https://github.com/yt-project/yt/pull/new/master
    A pull request is essentially just asking people to review and accept the
    modifications you have made to your personal version of the code.
 
-
 During the course of your pull request you may be asked to make changes.  These
 changes may be related to style issues, correctness issues, or even requesting
 tests.  The process for responding to pull request code review is relatively
@@ -650,98 +632,23 @@
 #. Make requested changes, or leave a comment indicating why you don't think
    they should be made.
 #. Commit those changes to your local repository.
-#. Push the changes to your fork:
+#. Push the changes to your fork::
 
-      hg push https://bitbucket.org/YourUsername/yt/
+      git push <YourRemoteIdentifier><YourBranchName>
 
 #. Your pull request will be automatically updated.
 
 .. _multiple-PRs:
 
-Working with Multiple BitBucket Pull Requests
----------------------------------------------
-
-Once you become active developing for yt, you may be working on
-various aspects of the code or bugfixes at the same time.  Currently,
-BitBucket's *modus operandi* for pull requests automatically updates
-your active pull request with every ``hg push`` of commits that are a
-descendant of the head of your pull request.  In a normal workflow,
-this means that if you have an active pull request, make some changes
-locally for, say, an unrelated bugfix, then push those changes back to
-your fork in the hopes of creating a *new* pull request, you'll
-actually end up updating your current pull request!
-
-There are a few ways around this feature of BitBucket that will allow
-for multiple pull requests to coexist; we outline one such method
-below.  We assume that you have a fork of yt at
-``http://bitbucket.org/YourUsername/Your_yt`` (see
-:ref:`sharing-changes` for instructions on creating a fork) and that
-you have an active pull request to the main repository.
-
-The main issue with starting another pull request is to make sure that
-your push to BitBucket doesn't go to the same head as your
-existing pull request and trigger BitBucket's auto-update feature.
-Here's how to get your local repository away from your current pull
-request head using `revsets <http://www.selenic.com/hg/help/revsets>`_
-and your ``hgrc`` file:
-
-#. Set up a Mercurial path for the main yt repository (note this is a convenience
-   step and only needs to be done once).  Add the following to your
-   ``Your_yt/.hg/hgrc``::
-
-     [paths]
-     upstream = https://bitbucket.org/yt_analysis/yt
-
-   This will create a path called ``upstream`` that is aliased to the URL of the
-   main yt repository.
-#. Now we'll use revsets_ to update your local repository to the tip of the
-   ``upstream`` path:
-
-   .. code-block:: bash
+Working with Multiple GitHub Pull Requests
+------------------------------------------
 
-      $ hg pull upstream
-      $ hg update -r "remote(yt, 'upstream')"
-
-After the above steps, your local repository should be at the current head of
-the ``yt`` branch in the main yt repository.  If you find yourself doing this a
-lot, it may be worth aliasing this task in your ``hgrc`` file by adding
-something like::
-
-  [alias]
-  ytupdate = update -r "remote(yt, 'upstream')"
-
-And then you can just issue ``hg ytupdate`` to get at the current head of the
-``yt`` branch on main yt repository.
-
-Make sure you are on the branch you want to be on, and then you can make changes
-and ``hg commit`` them.  If you prefer working with `bookmarks
-<http://mercurial-scm.org/wiki/Bookmarks>`_, you may want to make a bookmark
-before committing your changes, such as ``hg bookmark mybookmark``.
-
-To push your changes on a bookmark to bitbucket, you can issue the following
-command:
-
-.. code-block:: bash
-
-    $ hg push -B myfeature https://bitbucket.org/YourUsername/Your_yt
-
-The ``-B`` means "publish my bookmark, the changeset the bookmark is pointing
-at, and any ancestors of that changeset that aren't already on the remote
-server".
-
-To push to your fork on BitBucket if you didn't use a bookmark, you issue the
-following:
-
-.. code-block:: bash
-
-  $ hg push -r . -f https://bitbucket.org/YourUsername/Your_yt
-
-The ``-r .`` means "push only the commit I'm standing on and any ancestors."
-The ``-f`` is to force Mecurial to do the push since we are creating a new
-remote head without a bookmark.
-
-You can then go to the BitBucket interface and issue a new pull request based on
-your last changes, as usual.
+Dealing with multiple pull requests on GitHub is straightforward. Development on
+one feature should be isolated in one named branch, say ``feature_1`` while
+development of another feature should be in another named branch, say
+``feature_2``. A push to remote ``feature_1`` will automatically update any
+active PR for which ``feature_1`` is a pointer to the ``HEAD`` commit. A push to
+``feature_1`` *will not* update any pull requests involving ``feature_2``.
 
 .. _code-style-guide:
 
@@ -767,7 +674,7 @@
 
 .. code-block:: bash
 
-    $ cd $YT_HG
+    $ cd <REPOSITORY_NAME>
     $ flake8 ./yt
 
 This will print out any ``flake8`` errors or warnings that your newly added code

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/Makefile
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -33,20 +33,24 @@
 	@echo "  linkcheck   to check all external links for integrity"
 	@echo "  doctest     to run all doctests embedded in the documentation (if enabled)"
 	@echo "  clean 	     to remove the build directory"
-	@echo "  fullclean   to remove the build directory and autogenerated api docs"
 	@echo "  recipeclean to remove files produced by running the cookbook scripts"
 
 clean:
 	-rm -rf $(BUILDDIR)/*
+	-rm -rf source/reference/api/yt.*
+	-rm -rf source/reference/api/modules.rst
 
-fullclean:
-	-rm -rf $(BUILDDIR)/*
-	-rm -rf source/reference/api/generated
+fullclean: clean
 
 recipeclean:
 	-rm -rf _temp/*.done source/cookbook/_static/*
 
 html:
+ifneq ($(READTHEDOCS),True)
+	SPHINX_APIDOC_OPTIONS=members,undoc-members,inherited-members,show-inheritance sphinx-apidoc \
+        -o source/reference/api/ \
+        -e ../yt ../yt/extern/* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp*
+endif
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
 	@echo
 	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/extensions/pythonscript_sphinxext.py
--- a/doc/extensions/pythonscript_sphinxext.py
+++ b/doc/extensions/pythonscript_sphinxext.py
@@ -1,4 +1,5 @@
 import tempfile
+import time
 import os
 import glob
 import shutil
@@ -37,12 +38,16 @@
             f.write(content)
 
         # Use sphinx logger?
+        uid = uuid.uuid4().hex[:8]
         print("")
+        print(">> Contents of the script: %s" % uid)
         print(content)
         print("")
 
+        start = time.time()
         subprocess.call(['python', 'temp.py'])
-
+        print(">> The execution of the script %s took %f s" %
+              (uid, time.time() - start))
         text = ''
         for im in sorted(glob.glob("*.png")):
             text += get_image_tag(im, image_dir, image_rel_dir)

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/extensions/yt_cookbook.py
--- a/doc/extensions/yt_cookbook.py
+++ b/doc/extensions/yt_cookbook.py
@@ -23,7 +23,7 @@
 
     return retdict
 
-data_patterns = ["*.h5", "*.out", "*.dat"]
+data_patterns = ["*.h5", "*.out", "*.dat", "*.mp4"]
 
 class CookbookScript(Directive):
     required_arguments = 1

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/extensions/yt_showfields.py
--- /dev/null
+++ b/doc/extensions/yt_showfields.py
@@ -0,0 +1,32 @@
+import subprocess
+import sys
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('yt_showfields', ShowFields)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+    
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class ShowFields(Directive):
+    required_arguments = 0
+    optional_arguments = 0
+    parallel_read_safe = True
+    parallel_write_safe = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        lines = subprocess.check_output(
+            [sys.executable, './helper_scripts/show_fields.py'])
+        lines = lines.decode('utf8')
+        lines = lines.split('\n')
+        self.state_machine.insert_input(lines, rst_file)
+        return []

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -12,7 +12,7 @@
 from multiprocessing import Pool
 from yt.config import ytcfg
 
-FPATTERNS = ['*.png', '*.txt', '*.h5', '*.dat']
+FPATTERNS = ['*.png', '*.txt', '*.h5', '*.dat', '*.mp4']
 DPATTERNS = ['LC*', 'LR', 'DD0046']
 BADF = ['cloudy_emissivity.h5', 'apec_emissivity.h5',
         'xray_emissivity.h5', 'AMRGridData_Slice_x_density.png']

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -1,17 +1,14 @@
 import inspect
-from yt.mods import *
-from yt.testing import *
+from yt.testing import fake_random_ds
 import numpy as np
 from yt.utilities.cosmology import \
      Cosmology
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
 from yt.frontends.stream.fields import \
     StreamFieldInfo
 from yt.frontends.api import _frontends
 from yt.fields.derived_field import NullFunc
 import yt.frontends as frontends_module
-from yt.units.yt_array import YTArray, Unit
+from yt.units.yt_array import Unit
 from yt.units import dimensions
 
 fields, units = [], []
@@ -35,10 +32,11 @@
         return field
     return field[1]
 
+
 np.random.seed(int(0x4d3d3d3))
 units = [base_ds._get_field_info(*f).units for f in fields]
 fields = [_strip_ftype(f) for f in fields]
-ds = fake_random_ds(16, fields=fields, units=units, particles=True)
+ds = fake_random_ds(16, fields=fields, units=units, particles=1)
 ds.parameters["HydroMethod"] = "streaming"
 ds.parameters["EOSType"] = 1.0
 ds.parameters["EOSSoundSpeed"] = 1.0
@@ -160,6 +158,7 @@
                 print("  " + line)
             print()
 
+
 ds.index
 print_all_fields(ds.field_info)
 
@@ -183,6 +182,7 @@
         self.name = "(%s, '%s')" % (ftype, name)
         self.ptype = ptype
 
+
 current_frontends = [f for f in _frontends if f not in ["stream"]]
 
 for frontend in current_frontends:
@@ -194,7 +194,7 @@
         # Drop duplicate entry for GadgetHDF5, add special case for FieldInfo
         # entry
         dataset_names = ['GadgetDataset']
-        field_info_names = ['SPHFieldInfo']
+        field_info_names = ['GadgetFieldInfo']
     elif frontend == "boxlib":
         field_info_names = []
         for d in dataset_names:

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -30,7 +30,7 @@
 # INST_CONDA=0
 # INST_YT_SOURCE=1
 
-BRANCH="yt" # This is the branch to which we will forcibly update.
+BRANCH="master" # This is the branch we will install from for source installs
 
 # What follows are some other options that you may or may not need to change.
 
@@ -44,11 +44,12 @@
 
 # These options can be set to customize the installation.
 
-INST_PY3=0          # Install Python 3 along with Python 2. If this is turned
+INST_PY3=1          # Install Python 3 instead of Python 2. If this is turned
                     # on, all Python packages (including yt) will be installed
-                    # in Python 3 (except Mercurial, which requires Python 2).
-INST_HG=1           # Install Mercurial or not?  If hg is not already
-                    # installed, yt cannot be installed from source.
+                    # in Python 3.
+INST_GIT=1          # Install git or not?  If git is not already installed, yt
+                    # cannot be installed from source. Ignored if INST_CONDA=0
+INST_HG=0           # Install Mercurial or not? Ignored if INST_CONDA=0.
 INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
                     # ray tracing
 
@@ -148,6 +149,10 @@
         echo "Please set INST_YT_SOURCE to 1 and re-run."
         exit 1
     fi
+    if [ $INST_GIT -eq 1 ]
+    then
+        INST_GIT=0
+    fi
     DEST_SUFFIX="yt-`uname -m`"
 fi
 
@@ -200,7 +205,7 @@
 {
     CONFIG_FILE=${DEST_DIR}/.yt_config
 
-    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_GIT=${INST_GIT} > ${CONFIG_FILE}
     echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
     echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
     echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
@@ -397,9 +402,10 @@
         echo "  * gcc-{,c++,gfortran}"
         echo "  * make"
         echo "  * patch"
+        echo "  * git"
         echo
         echo "You can accomplish this by executing:"
-        echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip"
+        echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip git"
         echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
     fi
     if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
@@ -413,11 +419,12 @@
         echo "  * libuuid-devel"
         echo "  * zip"
         echo "  * gcc-c++"
+        echo "  * git"
         echo
         echo "You can accomplish this by executing:"
         echo
         echo "$ sudo zypper install -t pattern devel_C_C++"
-        echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+        echo "$ sudo zypper install git-core gcc-c++ libopenssl-devel libuuid-devel zip"
         echo
         echo "I am also setting special configure arguments to Python to"
         echo "specify control lib/lib64 issues."
@@ -437,10 +444,11 @@
         echo "  * uuid-dev"
         echo "  * libfreetype6-dev"
         echo "  * tk-dev"
+        echo "  * git"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev git"
         echo
         echo
         echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
@@ -566,9 +574,9 @@
 get_willwont ${INST_PY3}
 echo "be installing Python 3"
 
-printf "%-18s = %s so I " "INST_HG" "${INST_HG}"
-get_willwont ${INST_HG}
-echo "be installing Mercurial"
+printf "%-18s = %s so I " "INST_GIT" "${INST_GIT}"
+get_willwont ${INST_GIT}
+echo "be installing git"
 
 printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
 get_willwont ${INST_EMBREE}
@@ -640,10 +648,12 @@
 echo "If you'd rather stop, maybe think things over, even grab a sandwich, "
 echo "hit Ctrl-C."
 echo
+
 if [ $INST_YT_SOURCE -ne 0 ]
 then
    host_specific
 fi
+
 if [ $INST_CONDA -eq 0 ]
 then
     if [ ${USED_CONFIG} ]
@@ -679,7 +689,7 @@
 
 if [ $INST_PY3 -eq 1 ]
 then
-     PYTHON_EXEC='python3.5'
+     PYTHON_EXEC='python3'
 else 
      PYTHON_EXEC='python2.7'
 fi
@@ -698,12 +708,7 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
-    if [[ $LIB =~ .*mercurial.* ]]
-    then
-        PYEXE="python2.7"
-    else
-        PYEXE=${PYTHON_EXEC}
-    fi
+    PYEXE=${PYTHON_EXEC}
     case $LIB in
         *h5py*)
             pushd $LIB &> /dev/null
@@ -836,10 +841,9 @@
     LAPACK='lapack-3.4.2'
     PNG='libpng-1.6.3'
     MATPLOTLIB='matplotlib-1.5.1'
-    MERCURIAL='mercurial-3.7.3'
     NOSE='nose-1.3.7'
     NUMPY='numpy-1.11.0'
-    PYTHON_HGLIB='python-hglib-2.0'
+    GITPYTHON='GitPython-2.1.3'
     ROCKSTAR='rockstar-0.99.6'
     SCIPY='scipy-0.17.0'
     SQLITE='sqlite-autoconf-3071700'
@@ -867,10 +871,9 @@
     echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
     echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
     echo 'a0e78b5027a3a49cf8e77dc0d26f5f380dcd80f7b309b6121199acd5e1d94f48482864a9eee3bd397f7ac6f07fe1d3c21bf517217df3c72e8e3d105b7c2ae58e  matplotlib-1.5.1.tar.gz' > matplotlib-1.5.1.tar.gz.sha512
-    echo '7f9f97229e40c7092c16ccf227b19a08a9839d8ce19a9d057341fff75876bff32241ee9aa10eab293f779ea3e8a1d97577597187bd96251fb499cbb1075a82cf  mercurial-3.7.3.tar.gz' > mercurial-3.7.3.tar.gz.sha512
     echo 'e65c914f621f8da06b9ab11a0ff2763d6e29b82ce2aaed56da0e3773dc899d9deb1f20015789d44c65a5dad7214520f5b659b3f8d7695fb207ad3f78e5cf1b62  nose-1.3.7.tar.gz' > nose-1.3.7.tar.gz.sha512
     echo '92c1889397ad013e25da3a0657fc01e787d528fc19c29cc2acd286c3f07d41b984252583457b1b9259fc303afbe9694565cdcf5752eb4ecb950cc7a99ec1ad8b  numpy-1.11.0.tar.gz' > numpy-1.11.0.tar.gz.sha512
-    echo '647cc82424783efc3d74540e34976af66acc35fc36d66afba169508946cc62027910c7e41dc9d11ec88c15d6b1e113ce22c2781711ea324de58db3b24d5079c4  python-hglib-2.0.tar.gz' > python-hglib-2.0.tar.gz.sha512
+    echo '918ff1765a85a818619165c2bcbb0d417f35c979c2f42f1bb7e41636696c0cb4d6837725f3655fbdfebea966476d1255ee18adabe9ed5536455b63336a1f399d  GitPython-2.1.3.tar.gz' > GitPython-2.1.3.tar.gz.sha512
     echo 'de6409d75a3ff3cf1e5391d3b09126f0bc7e1a40a15f9bee244195638fe2f8481fca032896d8534623e6122ff59aaf669664e27ff89cf1b094a5ce7312f220b7  scipy-0.17.0.tar.gz' > scipy-0.17.0.tar.gz.sha512
     echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
     echo '977db6e9bc6a5918cceb255981a57e85e7060c0922aefd2968b004d25d704e25a5cb5bbe09eb387e8695581e23e2825d9c40310068fe25ece7e9c23037a21f39  sympy-1.0.tar.gz' > sympy-1.0.tar.gz.sha512
@@ -887,16 +890,19 @@
     [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
     [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
     [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-    [ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
-    [ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+    if [ $INST_PY3 -eq 1 ]
+    then
+        get_ytproject $PYTHON3.tgz
+    else
+        get_ytproject $PYTHON2.tgz
+    fi
     [ $INST_H5PY -eq 1 ] && get_ytproject $H5PY.tar.gz
     [ $INST_NOSE -eq 1 ] && get_ytproject $NOSE.tar.gz
     [ $INST_ASTROPY -eq 1 ] && get_ytproject $ASTROPY.tar.gz
-    get_ytproject $PYTHON2.tgz
     get_ytproject $NUMPY.tar.gz
     get_ytproject $MATPLOTLIB.tar.gz
     get_ytproject $CYTHON.tar.gz
-    get_ytproject $PYTHON_HGLIB.tar.gz
+    get_ytproject $GITPYTHON.tar.gz
     get_ytproject $SYMPY.tar.gz
     get_ytproject $SETUPTOOLS.tar.gz
 
@@ -936,7 +942,7 @@
             cd $ZLIB
             ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
             ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-            ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+            ( make clean 2>&1 ) 1>> ${LOG_FILE} || do_exit
             touch done
             cd ..
         fi
@@ -1016,37 +1022,11 @@
         fi
     fi
 
-    if [ ! -e $PYTHON2/done ]
-    then
-        echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
-        [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
-        cd $PYTHON2
-        ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        
-        ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
-        touch done
-        cd ..
-    fi
-
-    ( ${DEST_DIR}/bin/python -c "import _ssl" 2>&1 ) 1>> ${LOG_FILE}
-    RESULT=$?
-    if  [ $RESULT -ne 0 ]
-    then
-        echo "Unable to import the python SSL bindings."
-        echo "This means that OpenSSL is not installed or your system's OpenSSL"
-        echo "installation is out of date."
-        echo "Please install OpenSSL or set INST_CONDA=1"
-        do_exit
-    fi
-
     if [ $INST_PY3 -eq 1 ]
     then
         if [ ! -e $PYTHON3/done ]
         then
-            echo "Installing Python 3. Because two Pythons are better than one."
+            echo "Installing Python 3"
             [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
             cd $PYTHON3
             ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -1060,6 +1040,31 @@
             touch done
             cd ..
         fi
+    else
+        if [ ! -e $PYTHON2/done ]
+        then
+            echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+            [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+            cd $PYTHON2
+            ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+            ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+            touch done
+            cd ..
+        fi
+    fi
+
+    ( ${DEST_DIR}/bin/python -c "import _ssl" 2>&1 ) 1>> ${LOG_FILE}
+    RESULT=$?
+    if  [ $RESULT -ne 0 ]
+    then
+        echo "Unable to import the python SSL bindings."
+        echo "This means that OpenSSL is not installed or your system's OpenSSL"
+        echo "installation is out of date."
+        echo "Please install OpenSSL or set INST_CONDA=1"
+        do_exit
     fi
 
     export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
@@ -1067,19 +1072,12 @@
     # Install setuptools
     do_setup_py $SETUPTOOLS
 
-    if [ $INST_HG -eq 1 ]
+    if type -p git &>/dev/null
     then
-        do_setup_py $MERCURIAL
-        export HG_EXEC=${DEST_DIR}/bin/hg
+        GIT_EXE="git"
     else
-        # We assume that hg can be found in the path.
-        if type -P hg &>/dev/null
-        then
-            export HG_EXEC=hg
-        else
-            echo "Cannot find mercurial.  Please set INST_HG=1."
-            do_exit
-        fi
+        echo "Cannot find git. Please install git."
+        do_exit
     fi
 
     if [ -z "$YT_DIR" ]
@@ -1090,20 +1088,14 @@
         elif [ -e $ORIG_PWD/../yt/mods.py ]
         then
             YT_DIR=`dirname $ORIG_PWD`
-        elif [ ! -e yt-hg ]
+        elif [ ! -e yt-git ]
         then
             echo "Cloning yt"
-            YT_DIR="$PWD/yt-hg/"
-            ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
-            # Recently the hg server has had some issues with timeouts.  In lieu of
-            # a new webserver, we are now moving to a three-stage process.
-            # First we clone the repo, but only up to r0.
-            ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
-            # Now we update to the branch we're interested in.
-            ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-        elif [ -e yt-hg ]
+            YT_DIR="$PWD/yt-git/"
+            ( ${GIT_EXE} clone https://github.com/yt-project/yt/ ${YT_DIR} 2>&1 ) 1>> ${LOG_FILE}
+        elif [ -e yt-git ]
         then
-            YT_DIR="$PWD/yt-hg/"
+            YT_DIR="$PWD/yt-git/"
         fi
         echo Setting YT_DIR=${YT_DIR}
     fi
@@ -1206,7 +1198,7 @@
     then
         do_setup_py $ASTROPY
     fi
-    do_setup_py $PYTHON_HGLIB
+    do_setup_py $GITPYTHON
     do_setup_py $SYMPY
     [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
@@ -1220,11 +1212,9 @@
             echo "Building Rockstar"
             if [ ! -e rockstar ]
             then
-                ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+                ( ${GIT_EXE} clone https://github.com/yt-project/rockstar 2>&1 ) 1>> ${LOG_FILE}
             fi
             cd rockstar
-            ( hg pull 2>&1 ) 1>> ${LOG_FILE}
-            ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
             ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
             cp librockstar.so ${DEST_DIR}/lib
             ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
@@ -1234,10 +1224,9 @@
         fi
     fi
     
-    echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
     MY_PWD=`pwd`
     cd $YT_DIR
-    ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+    ( ${GIT_EXE} pull 2>1 && ${GIT_EXE} checkout ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
     echo "Installing yt"
     [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
@@ -1300,14 +1289,6 @@
         echo
         echo "The source for yt is located at:"
         echo "    $YT_DIR"
-        if [ $INST_HG -eq 1 ]
-        then
-            echo
-            echo "Mercurial has also been installed:"
-            echo
-            echo "$DEST_DIR/bin/hg"
-            echo
-        fi
         echo
         echo "For support, see the website and join the mailing list:"
         echo
@@ -1399,6 +1380,11 @@
     YT_DEPS+=('jupyter')
     YT_DEPS+=('ipython')
     YT_DEPS+=('sphinx')
+    if [ ${INST_GIT} -eq 1 ]
+    then
+        YT_DEPS+=('git')
+        YT_DEPS+=('gitpython')
+    fi
     if [ $INST_H5PY -ne 0 ]
     then
         YT_DEPS+=('h5py')
@@ -1418,7 +1404,7 @@
         YT_DEPS+=('astropy')
     fi
     YT_DEPS+=('conda-build')
-    if [ $INST_PY3 -eq 0 ]
+    if [ $INST_PY3 -eq 0 ] && [ $INST_HG -eq 1 ]
     then
        YT_DEPS+=('mercurial')
     fi
@@ -1431,22 +1417,22 @@
     
     log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
+    GIT_EXE=${DEST_DIR}/bin/git
+
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes ${YT_DEP}
     done
 
-    if [ $INST_PY3 -eq 1 ]
+    if [ $INST_PY3 -eq 1 ] && [ $INST_HG -eq 1 ]
     then
         echo "Installing mercurial"
         log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd ${DEST_DIR}/bin/pip install python-hglib
-
-    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${GIT_EXE} clone https://github.com/yt-project/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1480,7 +1466,7 @@
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ( ${GIT_EXE} clone https://github.com/yt-project/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
         ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
         log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
         log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
@@ -1504,9 +1490,9 @@
         log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
-        YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        log_cmd ${DEST_DIR}/bin/hg -R ${YT_DIR} up -C ${BRANCH}
+        YT_DIR="${DEST_DIR}/src/yt-git"
+        log_cmd ${GIT_EXE} clone https://github.com/yt-project/yt ${YT_DIR}
+        log_cmd ${GIT_EXE} -C ${YT_DIR} checkout ${BRANCH}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -16,7 +16,7 @@
 it has grown to handle any kind of data represented in a 2D or 3D volume.
 yt is an Python-based open source project and is open for anyone to use or
 contribute code.  The entire source code and history is available to all
-at https://bitbucket.org/yt_analysis/yt .
+at http://github.com/yt-project/yt .
 
 .. _who-is-yt:
 
@@ -31,7 +31,7 @@
 `our members website. <http://yt-project.org/members.html>`_
 
 For an up-to-date list of everyone who has contributed to the yt codebase,
-see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.
+see the current `CREDITS <https://github.com/yt-project/yt/blob/master/CREDITS>`_ file.
 For a more detailed breakup of contributions made by individual users, see out
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/Particle_Trajectories.ipynb
@@ -279,9 +279,9 @@
    "source": [
     "fig = plt.figure(figsize=(8.0, 8.0))\n",
     "ax = fig.add_subplot(111, projection='3d')\n",
-    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
-    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
-    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_y\"][100], trajs[\"particle_position_z\"][100])\n",
+    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_y\"][8], trajs[\"particle_position_z\"][8])\n",
+    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_y\"][25], trajs[\"particle_position_z\"][25])"
    ]
   },
   {

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -481,7 +481,9 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``.  See :ref:`halocatalog` for a demonstration
+of loading and working only with the catalog.
+Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -1,5 +1,12 @@
 .. _halo_mass_function:
 
+.. note::
+
+   This module has been deprecated as it no longer functions correctly and is
+   unmaintained.  The code has been moved to the `yt attic
+   <https://github.com/yt-project/yt_attic>`__. If you'd like to take it
+   over, please do!
+
 Halo Mass Function
 ==================
 

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/star_analysis.rst
--- a/doc/source/analyzing/analysis_modules/star_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/star_analysis.rst
@@ -1,5 +1,11 @@
 .. _star_analysis:
 
+.. note::
+
+   This module has been deprecated as it is unmaintained.  The code has been
+   moved to the `yt attic <https://github.com/yt-project/yt_attic>`__.
+   If you'd like to take it over, please do!
+
 Star Particle Analysis
 ======================
 .. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
@@ -209,8 +215,8 @@
 There are two ways to write out the data once the spectrum has been calculated.
 The command ``write_out`` outputs two columns of data:
 
-  1. Wavelength :math:`(\text{\AA})`
-  2. Flux (Luminosity per unit wavelength :math:`(\mathrm{\rm{L}_\odot} / \text{\AA})` , where
+  1. Wavelength (:math:`\text{Angstroms}`)
+  2. Flux (Luminosity per unit wavelength :math:`(\mathrm{\rm{L}_\odot} / \text{Angstrom})` , where
        :math:`\mathrm{\rm{L}_\odot} = 3.826 \cdot 10^{33}\, \mathrm{ergs / s}` ).
 
 and can be called simply, specifying the output file:
@@ -225,7 +231,7 @@
 distribution to. The default is 5200 Angstroms. This command outputs the data
 in two columns:
 
-  1. Wavelength :math:`(\text{\AA})`
+  1. Wavelength :math:`(\text{Angstroms})`
   2. Relative flux normalized to the flux at *flux_norm*.
 
 .. code-block:: python

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/sunrise_export.rst
--- a/doc/source/analyzing/analysis_modules/sunrise_export.rst
+++ b/doc/source/analyzing/analysis_modules/sunrise_export.rst
@@ -1,5 +1,11 @@
 .. _sunrise_export:
 
+.. note::
+
+   This module has been deprecated as it is unmaintained.  The code has been
+   moved to the `yt attic <https://github.com/yt-project/yt_attic>`__.
+   If you'd like to take it over, please do!
+
 Exporting to Sunrise
 ====================
 

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/two_point_functions.rst
--- a/doc/source/analyzing/analysis_modules/two_point_functions.rst
+++ b/doc/source/analyzing/analysis_modules/two_point_functions.rst
@@ -1,5 +1,11 @@
 .. _two_point_functions:
 
+.. note::
+
+   This module has been deprecated as it is unmaintained.  The code has been
+   moved to the `yt attic <https://github.com/yt-project/yt_attic>`__.
+   If you'd like to take it over, please do!
+
 Two Point Functions
 ===================
 .. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -1,3 +1,6 @@
 .. _xray_emission_fields:
 
+X-ray Emission Fields
+=====================
+
 .. notebook:: XrayEmissionFields.ipynb

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -12,8 +12,8 @@
 and execute the documentation interactively, you need to download the repository
 and start the IPython notebook.
 
-You will then need to navigate to :code:`$YT_HG/doc/source/units` (where $YT_HG
-is the location of a clone of the yt mercurial repository), and then start an
+You will then need to navigate to :code:`$YT_GIT/doc/source/units` (where $YT_GIT
+is the location of a clone of the yt git repository), and then start an
 IPython notebook server:
 
 .. code:: bash

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,9 +11,9 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys, os, glob, re
-from sphinx.search import WordCollector
-from docutils.nodes import comment, title, Text, SkipNode
+import sys
+import os
+import glob
 
 on_rtd = os.environ.get("READTHEDOCS", None) == "True"
 
@@ -30,9 +30,9 @@
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
-              'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
+              'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
               'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
-              'config_help']
+              'config_help', 'yt_showfields']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')
@@ -228,13 +228,6 @@
 # If true, show URL addresses after external links.
 #latex_show_urls = False
 
-# Additional stuff for the LaTeX preamble.
-latex_preamble = r"""
-\renewcommand{\AA}{\text{\r{A}}} % Allow \AA in math mode
-\usepackage[utf8]{inputenc}      % Allow unicode symbols in text
-\DeclareUnicodeCharacter {212B} {\AA}                  % Angstrom
-"""
-
 # Documents to append as an appendix to all manuals.
 #latex_appendices = []
 

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -71,6 +71,13 @@
 
 .. yt_cookbook:: multi_plot_3x2_FRB.py
 
+Time Series Movie
+~~~~~~~~~~~~~~~~~
+
+This shows how to use matplotlib's animation framework with yt plots.
+
+.. yt_cookbook:: matplotlib-animation.py
+
 .. _cookbook-offaxis_projection:
 
 Off-Axis Projection (an alternate method)

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/cookbook/matplotlib-animation.py
--- /dev/null
+++ b/doc/source/cookbook/matplotlib-animation.py
@@ -0,0 +1,22 @@
+import yt
+from matplotlib.animation import FuncAnimation
+from matplotlib import rc_context
+
+ts = yt.load('GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_*')
+
+plot = yt.SlicePlot(ts[0], 'z', 'density')
+plot.set_zlim('density', 8e-29, 3e-26)
+
+fig = plot.plots['density'].figure
+
+# animate must accept an integer frame number. We use the frame number
+# to identify which dataset in the time series we want to load
+def animate(i):
+    ds = ts[i]
+    plot._switch_ds(ds)
+
+animation = FuncAnimation(fig, animate, frames=len(ts))
+
+# Override matplotlib's defaults to get a nicer looking font
+with rc_context({'mathtext.fontset': 'stix'}):
+    animation.save('animation.mp4')

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -36,7 +36,7 @@
 
 
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
-BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
+BLACKLIST = ["opengl_ipython.py", "opengl_vr.py", "matplotlib-animation.py"]
 
 if sys.version_info >= (3,0,0):
     BLACKLIST.append("rockstar_nest.py")

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -19,9 +19,9 @@
 include a recipe in the cookbook section, or it could simply be adding a note
 in the relevant docs text somewhere.
 
-The documentation exists in the main mercurial code repository for yt in the
-``doc`` directory (i.e. ``$YT_HG/doc/source`` where ``$YT_HG`` is the path of
-the yt mercurial repository).  It is organized hierarchically into the main
+The documentation exists in the main code repository for yt in the
+``doc`` directory (i.e. ``$YT_GIT/doc/source`` where ``$YT_GIT`` is the path of
+the yt git repository).  It is organized hierarchically into the main
 categories of:
 
 * Visualizing
@@ -202,9 +202,7 @@
 :ref:`configuration-file`) to suppress large amounts of debug output from
 yt.
 
-To clean the docs build, use :code:`make clean`.  By default, :code:`make clean`
-will not delete the autogenerated API docs, so use :code:`make fullclean` to
-delete those as well.
+To clean the docs build, use :code:`make clean`.
 
 Building the Docs (Hybrid)
 ^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/developing/extensions.rst
--- a/doc/source/developing/extensions.rst
+++ b/doc/source/developing/extensions.rst
@@ -40,10 +40,10 @@
 
 A template for starting an extension module (or converting an existing set of
 code to an extension module) can be found at
-https://bitbucket.org/yt_analysis/yt_extension_template .
+https://github.com/yt-project/yt_extension_template .
 
 To get started, download a zipfile of the template (
-https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+https://github.com/yt-project/yt_extension_template/archive/master.zip ) and
 follow the directions in ``README.md`` to modify the metadata.
 
 Distributing Extensions

diff -r ff193b815c5f68b5c14f90ed8fdaa16c8b0be7f8 -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 doc/source/developing/releasing.rst
--- a/doc/source/developing/releasing.rst
+++ b/doc/source/developing/releasing.rst
@@ -12,9 +12,8 @@
   once a month. These releases should contain only fixes for bugs discovered in
   earlier releases and should not contain new features or API changes. Bugfix
   releases should increment the ``PATCH`` version number. Bugfix releases should
-  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
-  requests should be manually backported using the PR backport script, described
-  below. Version ``3.2.2`` is a bugfix release.
+  *not* be generated by merging from the ``master`` branch, instead bugfix pull
+  requests should be manually backported. Version ``3.2.2`` is a bugfix release.
 
 * Minor releases
 
@@ -25,7 +24,7 @@
   backwards-incompatible changes and should not change APIs.  If an API change
   is deemed to be necessary, the old API should continue to function but might
   trigger deprecation warnings. Minor releases should happen by merging the
-  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``master`` branch into the ``stable`` branch. Minor releases should increment the
   ``MINOR`` version number and reset the ``PATCH`` version number to zero.
   Version ``3.3.0`` is a minor release.
 
@@ -36,7 +35,7 @@
   include arbitrary changes to the library. Major version releases should only
   happen after extensive discussion and vetting among the developer and user
   community. Like minor releases, a major release should happen by merging the
-  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``master`` branch into the ``stable`` branch. Major releases should increment the
   ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
   to zero. If it ever happens, version ``4.0.0`` will be a major release.
 
@@ -49,99 +48,25 @@
 As described above, bugfix releases are regularly scheduled updates for minor
 releases to ensure fixes for bugs make their way out to users in a timely
 manner. Since bugfix releases should not include new features, we do not issue
-bugfix releases by simply merging from the development ``yt`` branch into the
-``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
-manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
-branch.
-
-The backport script issues interactive prompts to backport individual pull
-requests to the ``stable`` branch in a temporary clone of the main yt mercurial
-repository on bitbucket. The script is written this way to to avoid editing
-history in a clone of the repository that a developer uses for day-to-day work
-and to avoid mixing work-in-progress changes with changes that have made their
-way to the "canonical" yt repository on bitbucket.
-
-Rather than automatically manipulating the temporary repository by scripting
-mercurial commands using ``python-hglib``, the script must be "operated" by a
-human who is ready to think carefully about what the script is telling them
-to do. Most operations will merely require copy/pasting a suggested mercurial
-command. However, some changes will require manual backporting.
-
-To run the backport script, first open two terminal windows. The first window
-will be used to run the backport script. The second terminal will be used to
-manipulate a temporary clone of the yt mercurial repository. In the first
-window, navigate to the ``scripts`` directory at the root of the yt repository
-and run the backport script,
-
-.. code-block:: bash
-
-   $ cd $YT_HG/scripts
-   $ python pr_backport.py
-
-You will then need to wait for about a minute (depending on the speed of your
-internet connection and bitbucket's servers) while the script makes a clone of
-the main yt repository and then gathers information about pull requests that
-have been merged since the last tagged release. Once this step finishes, you
-will be prompted to navigate to the temporary folder in a new separate terminal
-session. Do so, and then hit the enter key in the original terminal session.
-
-For each pull request in the set of pull requests that were merged since the
-last tagged release that were pointed at the "main" line of development
-(e.g. not the ``experimental`` bookmark), you will be prompted by the script
-with the PR number, title, description, and a suggested mercurial
-command to use to backport the pull request. If the pull request consists of a
-single changeset, you will be prompted to use ``hg graft``. If it contains more
-than one changeset, you will be prompted to use ``hg rebase``. Note that
-``rebase`` is an optional extension for mercurial that is not turned on by
-default. To enable it, add a section like the following in your ``.hgrc`` file:
-
-.. code-block:: none
-
-   [extensions]
-   rebase=
-
-Since ``rebase`` is bundled with core mercurial, you do not need to specify a
-path to the rebase extension, just say ``rebase=`` and mercurial will find the
-version of ``rebase`` bundled with mercurial. Note also that mercurial does not
-automatically update to the tip of the rebased head after executing ``hg
-rebase`` so you will need to manually issue ``hg update stable`` to move your
-working directory to the new head of the stable branch. The backport script
-should prompt you with a suggestion to update as well.
+bugfix releases by simply merging from the development ``master`` branch into
+the ``stable`` branch.  Instead, we manually cherry-pick bugfixes from the from
+``master`` branch onto the ``stable`` branch.
 
 If the pull request contains merge commits, you must take care to *not* backport
-commits that merge with the main line of development on the ``yt`` branch. Doing
+commits that merge with the main line of development on the ``master`` branch. Doing
 so may bring unrelated changes, including new features, into a bugfix
-release. If the pull request you'd like to backport contains merge commits, the
-backport script should warn you to be extra careful.
+release.
 
-Once you've finished backporting, the script will let you know that you are done
-and warn you to push your work. The temporary repository you have been working
-with will be deleted as soon as the script exits, so take care to push your work
-on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
-fork, you will be able to issue a pull request containing the backported fixes
-just like any other yt pull request.
+Once you've finished backporting push your work to Github. Once you've pushed to
+your fork, you will be able to issue a pull request containing the backported
+fixes just like any other yt pull request.
 
 Doing a Minor or Major Release
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This is much simpler than a bugfix release.  All that needs to happen is the
-``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
-happen must be resolved, almost certainly in favor of the yt branch. This can
-happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
-mercurial to write merge markers. If you prefer merge markers, the following
-configuration options should be turned on in your ``hgrc`` to get more detail
-during the merge:
-
-.. code-block:: none
-
-   [ui]
-   merge = internal:merge3
-   mergemarkers = detailed
-
-The first option tells mercurial to write merge markers that show the state of
-the conflicted region of the code on both sides of the merge as well as the
-"base" most recent common ancestor changeset. The second option tells mercurial
-to add extra information about the code near the merge markers.
+``master`` branch must get merged into the ``stable`` branch, and any conflicts
+that happen must be resolved.
 
 
 Incrementing Version Numbers and Tagging a Release
@@ -174,35 +99,42 @@
 
 .. code-block:: bash
 
-   hg tag <tag-name>
+   git tag <tag-name>
 
 Where ``<tag-name>`` follows the project's naming scheme for tags
-(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
-release to pypi.
+(e.g. ``yt-3.2.1``). Once you are done, you will need to push the
+tag to github::
+
+  git push origin --tags
 
-If you are doing a minor or major version number release, you will also need to
-update back to the development branch and update the development version numbers
-in the same files.
+This assumes that you have configured the remote ``origin`` to point at the main
+yt git repository. If you are doing a minor or major version number release, you
+will also need to update back to the development branch and update the
+development version numbers in the same files.
 
 
 Uploading to PyPI
 ~~~~~~~~~~~~~~~~~
 
 To actually upload the release to the Python Package Index, you just need to
-issue the following command:
+issue the following commands:
 
 .. code-block:: bash
 
-   python setup.py sdist upload -r https://pypi.python.org/pypi
+   python setup.py sdist
+   twine upload dist/*
 
 You will be prompted for your PyPI credentials and then the package should
 upload. Note that for this to complete successfully, you will need an account on
 PyPI and that account will need to be registered as an "owner" of the yt
-package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
-Goldbaum.
+package. Right now there are five owners: Matt Turk, Britton Smith, Nathan
+Goldbaum, John ZuHone, and Kacper Kowalik. In addition, you should attempt to
+upload the yt package along with compiled binary wheel packages for various
+platforms that we support.  You should contact John ZuHone about uploading
+binary wheels to PyPI for Windows and OS X users, Kacper Kowalik for Linux
+wheels, and contact Nathan Goldbaum about getting the Anaconda packages updated.
+
 
 After the release is uploaded to PyPI, you should send out an announcement
 e-mail to the yt mailing lists as well as other possibly interested mailing
-lists for all but bugfix releases. In addition, you should contact John ZuHone
-about uploading binary wheels to PyPI for Windows and OS X users and contact
-Nathan Goldbaum about getting the Anaconda packages updated.
+lists for all but bugfix releases.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c8a22fa9f45d/
Changeset:   c8a22fa9f45d
User:        jzuhone
Date:        2017-05-05 15:23:48+00:00
Summary:     Add fnv_hash.c to gitignore
Affected #:  1 file

diff -r 5e3a51792b499e0fa61cabeb24b7eb4ed60580b0 -r c8a22fa9f45d29748d5b06a00f694f16d5e1d495 .gitignore
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
+yt/utilities/lib/fnv_hash.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c


https://bitbucket.org/yt_analysis/yt/commits/40e76bec05d5/
Changeset:   40e76bec05d5
User:        jzuhone
Date:        2017-05-05 15:25:54+00:00
Summary:     Catch where magnetic_unit has been set to None
Affected #:  1 file

diff -r c8a22fa9f45d29748d5b06a00f694f16d5e1d495 -r 40e76bec05d58389f39c481b066bdb5b9de10b07 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -922,7 +922,8 @@
 
     def _assign_unit_system(self, unit_system):
         current_mks_unit = None
-        if hasattr(self, 'magnetic_unit'):
+        magnetic_unit = getattr(self, 'magnetic_unit', None)
+        if magnetic_unit is not None:
             # if the magnetic unit is in T, we need to create the code unit
             # system as an MKS-like system
             if current_mks in self.magnetic_unit.units.dimensions.free_symbols:


https://bitbucket.org/yt_analysis/yt/commits/00e5316a94b5/
Changeset:   00e5316a94b5
User:        jzuhone
Date:        2017-05-06 16:54:26+00:00
Summary:     This line can be less scary-sounding now
Affected #:  1 file

diff -r 40e76bec05d58389f39c481b066bdb5b9de10b07 -r 00e5316a94b5ce5c4b37c129ef1ac431e6e0e519 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1041,9 +1041,8 @@
         if len(self.units_override) == 0:
             return
         mylog.warning(
-            "Overriding code units. This is an experimental and potentially "
-            "dangerous option that may yield inconsistent results, and must be "
-            "used very carefully, and only if you know what you want from it.")
+            "Overriding code units: Use this option only if you know that the "
+            "dataset doesn't define the units correctly or at all.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("velocity","cm/s"), ("magnetic","gauss"),
                           ("temperature","K")]:


https://bitbucket.org/yt_analysis/yt/commits/da759775b6ef/
Changeset:   da759775b6ef
User:        jzuhone
Date:        2017-05-06 19:50:04+00:00
Summary:     We should just set cgs units here and let units_override handle the rest
Affected #:  1 file

diff -r 00e5316a94b5ce5c4b37c129ef1ac431e6e0e519 -r da759775b6efd21708ab036cacbeac8b2077e226 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -32,6 +32,8 @@
     dimensionless as sympy_one
 from yt.units.unit_object import \
     Unit
+from yt.units.unit_systems import \
+    unit_system_registry
 from yt.utilities.exceptions import \
     YTGDFUnknownGeometry
 from yt.utilities.lib.misc_utilities import \
@@ -222,6 +224,12 @@
                 # comparisons are insufficient
                 unit = Unit(unit, registry=self.unit_registry)
                 if unit_name.endswith('_unit') and unit.dimensions is sympy_one:
+                    # Catch code units and if they are dimensionless,
+                    # assign CGS units. setdefaultattr will catch code units
+                    # which have already been set via units_override. 
+                    un = unit_name[:-5]
+                    un = un.replace('magnetic', 'magnetic_field', 1)
+                    unit = unit_system_registry["cgs"][un]
                     setdefaultattr(self, unit_name, self.quan(value, unit))
                 setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:


https://bitbucket.org/yt_analysis/yt/commits/8acbdba5a7b5/
Changeset:   8acbdba5a7b5
User:        jzuhone
Date:        2017-05-07 17:10:11+00:00
Summary:     Fix docs for in_base("code")
Affected #:  1 file

diff -r da759775b6efd21708ab036cacbeac8b2077e226 -r 8acbdba5a7b5e7c70372bfb195a185f52b2c4550 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -161,7 +161,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+    "`in_base` also takes the `\"code\"` argument to convert the `YTArray` into the base units of the dataset:"
    ]
   },
   {
@@ -170,7 +170,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "print (dd['pressure'].in_base(ds)) # The IsolatedGalaxy dataset from above"
+    "print (dd['pressure'].in_base(\"code\")) # The IsolatedGalaxy dataset from above"
    ]
   },
   {


https://bitbucket.org/yt_analysis/yt/commits/4e4f5902c94e/
Changeset:   4e4f5902c94e
User:        jzuhone
Date:        2017-05-08 12:44:32+00:00
Summary:     This should work like it does in the Athena frontend
Affected #:  1 file

diff -r 8acbdba5a7b5e7c70372bfb195a185f52b2c4550 -r 4e4f5902c94e5f8a77f434c2be2597b9b68aa33a yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -278,12 +278,8 @@
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
-    def set_code_units(self):
-        super(AthenaPPDataset, self).set_code_units()
-        mag_unit = getattr(self, "magnetic_unit", None)
-        if mag_unit is None:
-            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                     (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
         temp_unit = getattr(self, "temperature_unit", None)
         if temp_unit is None:


https://bitbucket.org/yt_analysis/yt/commits/99730ac3fb04/
Changeset:   99730ac3fb04
User:        jzuhone
Date:        2017-05-08 13:16:14+00:00
Summary:     This line is redundant because we set it above
Affected #:  1 file

diff -r 4e4f5902c94e5f8a77f434c2be2597b9b68aa33a -r 99730ac3fb04c29171d0c1be9aa2b4a75ac6bae4 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -281,9 +281,6 @@
         self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                      (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
-        temp_unit = getattr(self, "temperature_unit", None)
-        if temp_unit is None:
-            self.temperature_unit = self.quan(1.0, "K")
         self.velocity_unit = self.length_unit / self.time_unit
 
     def _parse_parameter_file(self):


https://bitbucket.org/yt_analysis/yt/commits/09283738ac20/
Changeset:   09283738ac20
User:        jzuhone
Date:        2017-05-08 14:10:56+00:00
Summary:     Bump Athena++ answers
Affected #:  1 file

diff -r 99730ac3fb04c29171d0c1be9aa2b4a75ac6bae4 -r 09283738ac20284d49a99d6a511aca4e6b6c8801 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -5,7 +5,7 @@
   local_athena_003:
     - yt/frontends/athena
 
-  local_athena_pp_000:
+  local_athena_pp_001:
     - yt/frontends/athena_pp
 
   local_chombo_002:


https://bitbucket.org/yt_analysis/yt/commits/5e3f426472bb/
Changeset:   5e3f426472bb
User:        jzuhone
Date:        2017-05-08 14:32:10+00:00
Summary:     Rename this function and add a docstring
Affected #:  3 files

diff -r 09283738ac20284d49a99d6a511aca4e6b6c8801 -r 5e3f426472bb5c8d46055c4ec90b12861ad42412 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -19,7 +19,7 @@
 cimport cython
 from cython cimport floating
 from libc.stdlib cimport malloc, free
-from yt.utilities.lib.fnv_hash cimport _fnv_hash as fnv_hash
+from yt.utilities.lib.fnv_hash cimport c_fnv_hash as fnv_hash
 from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin, imin, imax
 from .oct_container cimport OctreeContainer, Oct
 cimport oct_visitors

diff -r 09283738ac20284d49a99d6a511aca4e6b6c8801 -r 5e3f426472bb5c8d46055c4ec90b12861ad42412 yt/utilities/lib/fnv_hash.pxd
--- a/yt/utilities/lib/fnv_hash.pxd
+++ b/yt/utilities/lib/fnv_hash.pxd
@@ -18,4 +18,4 @@
 import numpy as np
 cimport numpy as np
 
-cdef np.int64_t _fnv_hash(unsigned char[:] octets)
+cdef np.int64_t c_fnv_hash(unsigned char[:] octets)

diff -r 09283738ac20284d49a99d6a511aca4e6b6c8801 -r 5e3f426472bb5c8d46055c4ec90b12861ad42412 yt/utilities/lib/fnv_hash.pyx
--- a/yt/utilities/lib/fnv_hash.pyx
+++ b/yt/utilities/lib/fnv_hash.pyx
@@ -15,7 +15,7 @@
 import numpy as np
 cimport numpy as np
 
-cdef np.int64_t _fnv_hash(unsigned char[:] octets):
+cdef np.int64_t c_fnv_hash(unsigned char[:] octets):
     # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
     # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
     cdef np.int64_t hash_val = 2166136261
@@ -26,4 +26,14 @@
     return hash_val
 
 def fnv_hash(octets):
-    return _fnv_hash(octets)
\ No newline at end of file
+    """
+    
+    Create a FNV hash from a bytestring.
+    Info: http://www.isthe.com/chongo/tech/comp/fnv/index.html
+    
+    Parameters
+    ----------
+    octets : bytestring
+        The string of bytes to generate a hash from. 
+    """
+    return c_fnv_hash(octets)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/84066fbb6b5b/
Changeset:   84066fbb6b5b
User:        jzuhone
Date:        2017-05-08 14:35:40+00:00
Summary:     Docstring here for unit_system_id
Affected #:  1 file

diff -r 5e3f426472bb5c8d46055c4ec90b12861ad42412 -r 84066fbb6b5b257b2c9c539b7469d89b9b07d173 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -57,6 +57,11 @@
     _unit_system_id = None
     @property
     def unit_system_id(self):
+        """
+        This is a unique identifier for the unit registry created 
+        from a FNV hash. It is needed to register a dataset's code
+        unit system in the unit system registry.
+        """
         if self._unit_system_id is None:
             hash_data = bytearray()
             for k, v in self.lut.items():


https://bitbucket.org/yt_analysis/yt/commits/1619d16d53f5/
Changeset:   1619d16d53f5
User:        jzuhone
Date:        2017-05-08 15:22:13+00:00
Summary:     Add a test for code system uniqueness
Affected #:  1 file

diff -r 84066fbb6b5b257b2c9c539b7469d89b9b07d173 -r 1619d16d53f59c0cd18f8ad9b1d3e1cf3e16599c yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -15,7 +15,8 @@
 from yt.units.unit_systems import UnitSystem
 from yt.units import dimensions
 from yt.convenience import load
-from yt.testing import assert_almost_equal, assert_allclose, requires_file
+from yt.testing import assert_almost_equal, assert_allclose, requires_file, \
+    fake_random_ds
 from yt.config import ytcfg
 
 def test_unit_systems():
@@ -133,3 +134,14 @@
             assert_allclose(magx.value, magnetic_field_x.value)
             assert_allclose(magnetic_field_x.to_equivalent('G', 'CGS').value,
                             magnetic_field_x.value*1e4)
+
+def test_code_unit_system_uniqueness():
+    ds1 = fake_random_ds(64)
+    ds2 = fake_random_ds(64, length_unit=2.0)
+    ds3 = fake_random_ds(64)
+
+    assert ds1.unit_registry.unit_system_id != ds2.unit_registry.unit_system_id
+    assert ds1.unit_registry.unit_system_id == ds3.unit_registry.unit_system_id
+
+    assert ds1.unit_registry.unit_system_id in unit_system_registry.keys()
+    assert ds2.unit_registry.unit_system_id in unit_system_registry.keys()
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/d4f3c9c958e4/
Changeset:   d4f3c9c958e4
User:        ngoldbaum
Date:        2017-05-08 19:49:19+00:00
Summary:     Merge pull request #1353 from jzuhone/code_units

Properly create code unit systems
Affected #:  15 files

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc .gitignore
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
+yt/utilities/lib/fnv_hash.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,6 +37,7 @@
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
+yt/utilities/lib/fnv_hash.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -161,7 +161,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+    "`in_base` also takes the `\"code\"` argument to convert the `YTArray` into the base units of the dataset:"
    ]
   },
   {
@@ -170,7 +170,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "print (dd['pressure'].in_base(ds)) # The IsolatedGalaxy dataset from above"
+    "print (dd['pressure'].in_base(\"code\")) # The IsolatedGalaxy dataset from above"
    ]
   },
   {

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc setup.py
--- a/setup.py
+++ b/setup.py
@@ -110,6 +110,10 @@
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.fnv_hash",
+              ["yt/utilities/lib/fnv_hash.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=std_libs),
     Extension("yt.utilities.lib.geometry_utils",
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -5,7 +5,7 @@
   local_athena_003:
     - yt/frontends/athena
 
-  local_athena_pp_000:
+  local_athena_pp_001:
     - yt/frontends/athena_pp
 
   local_chombo_002:

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -301,10 +301,10 @@
         self.no_cgs_equiv_length = False
 
         self._create_unit_registry()
-        self._assign_unit_system(unit_system)
 
         self._parse_parameter_file()
         self.set_units()
+        self._assign_unit_system(unit_system)
         self._setup_coordinate_handler()
 
         # Because we need an instantiated class to check the ds's existence in
@@ -921,9 +921,24 @@
         return self.refine_by**(l1-l0)
 
     def _assign_unit_system(self, unit_system):
-        create_code_unit_system(self)
+        current_mks_unit = None
+        magnetic_unit = getattr(self, 'magnetic_unit', None)
+        if magnetic_unit is not None:
+            # if the magnetic unit is in T, we need to create the code unit
+            # system as an MKS-like system
+            if current_mks in self.magnetic_unit.units.dimensions.free_symbols:
+                if unit_system == "code":
+                    current_mks_unit = 'A'
+                elif unit_system == 'mks':
+                    pass
+                else:
+                    self.magnetic_unit = \
+                        self.magnetic_unit.to_equivalent('gauss', 'CGS')
+            self.unit_registry.modify("code_magnetic", self.magnetic_unit)
+        create_code_unit_system(self.unit_registry, 
+                                current_mks_unit=current_mks_unit)
         if unit_system == "code":
-            unit_system = str(self)
+            unit_system = self.unit_registry.unit_system_id
         else:
             unit_system = str(unit_system).lower()
         self.unit_system = unit_system_registry[unit_system]
@@ -1004,21 +1019,6 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        if hasattr(self, 'magnetic_unit'):
-            # if the magnetic unit is in T, we need to recreate the code unit
-            # system as an MKS-like system
-            if current_mks in self.magnetic_unit.units.dimensions.free_symbols:
-
-                if self.unit_system == unit_system_registry[str(self)]:
-                    unit_system_registry.pop(str(self))
-                    create_code_unit_system(self, current_mks_unit='A')
-                    self.unit_system = unit_system_registry[str(self)]
-                elif str(self.unit_system) == 'mks':
-                    pass
-                else:
-                    self.magnetic_unit = \
-                        self.magnetic_unit.to_equivalent('gauss', 'CGS')
-            self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         vel_unit = getattr(
             self, "velocity_unit", self.length_unit / self.time_unit)
         pressure_unit = getattr(
@@ -1041,9 +1041,8 @@
         if len(self.units_override) == 0:
             return
         mylog.warning(
-            "Overriding code units. This is an experimental and potentially "
-            "dangerous option that may yield inconsistent results, and must be "
-            "used very carefully, and only if you know what you want from it.")
+            "Overriding code units: Use this option only if you know that the "
+            "dataset doesn't define the units correctly or at all.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("velocity","cm/s"), ("magnetic","gauss"),
                           ("temperature","K")]:

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -278,16 +278,9 @@
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
 
-    def set_code_units(self):
-        super(AthenaPPDataset, self).set_code_units()
-        mag_unit = getattr(self, "magnetic_unit", None)
-        if mag_unit is None:
-            self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
-                                         (self.time_unit**2 * self.length_unit))
+        self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
+                                     (self.time_unit**2 * self.length_unit))
         self.magnetic_unit.convert_to_units("gauss")
-        temp_unit = getattr(self, "temperature_unit", None)
-        if temp_unit is None:
-            self.temperature_unit = self.quan(1.0, "K")
         self.velocity_unit = self.length_unit / self.time_unit
 
     def _parse_parameter_file(self):

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -32,6 +32,8 @@
     dimensionless as sympy_one
 from yt.units.unit_object import \
     Unit
+from yt.units.unit_systems import \
+    unit_system_registry
 from yt.utilities.exceptions import \
     YTGDFUnknownGeometry
 from yt.utilities.lib.misc_utilities import \
@@ -222,9 +224,12 @@
                 # comparisons are insufficient
                 unit = Unit(unit, registry=self.unit_registry)
                 if unit_name.endswith('_unit') and unit.dimensions is sympy_one:
+                    # Catch code units and if they are dimensionless,
+                    # assign CGS units. setdefaultattr will catch code units
+                    # which have already been set via units_override. 
                     un = unit_name[:-5]
                     un = un.replace('magnetic', 'magnetic_field', 1)
-                    unit = self.unit_system[un]
+                    unit = unit_system_registry["cgs"][un]
                     setdefaultattr(self, unit_name, self.quan(value, unit))
                 setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -19,6 +19,7 @@
 cimport cython
 from cython cimport floating
 from libc.stdlib cimport malloc, free
+from yt.utilities.lib.fnv_hash cimport c_fnv_hash as fnv_hash
 from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin, imin, imax
 from .oct_container cimport OctreeContainer, Oct
 cimport oct_visitors
@@ -45,16 +46,6 @@
 cdef np.float64_t grid_eps = np.finfo(np.float64).eps
 grid_eps = 0.0
 
-cdef np.int64_t fnv_hash(unsigned char[:] octets):
-    # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
-    # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
-    cdef np.int64_t hash_val = 2166136261
-    cdef char octet
-    for octet in octets:
-        hash_val = hash_val ^ octet
-        hash_val = hash_val * 16777619
-    return hash_val
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/units/tests/test_unit_systems.py
--- a/yt/units/tests/test_unit_systems.py
+++ b/yt/units/tests/test_unit_systems.py
@@ -15,7 +15,8 @@
 from yt.units.unit_systems import UnitSystem
 from yt.units import dimensions
 from yt.convenience import load
-from yt.testing import assert_almost_equal, assert_allclose, requires_file
+from yt.testing import assert_almost_equal, assert_allclose, requires_file, \
+    fake_random_ds
 from yt.config import ytcfg
 
 def test_unit_systems():
@@ -69,11 +70,7 @@
         ds = load(gslr, unit_system=us)
         dd = ds.sphere("c", (15.,"kpc"))
         for field in test_fields:
-            if us == "code":
-                # For this dataset code units are cgs
-                v1 = dd_cgs[field]
-            else:
-                v1 = dd_cgs[field].in_base(us)
+            v1 = dd_cgs[field].in_base(us)
             v2 = dd[field]
             assert_almost_equal(v1.v, v2.v)
             assert str(v2.units) == test_units[us][field]
@@ -88,7 +85,7 @@
 
     for us in test_units:
         ds = load(etc, unit_system=us)
-        dd = ds.sphere("max", (500.,"kpc"))
+        dd = ds.sphere("max", (500., "kpc"))
         for field in test_fields:
             if us == "code":
                 v1 = dd_cgs[field].in_units(test_units["code"][field])
@@ -137,3 +134,14 @@
             assert_allclose(magx.value, magnetic_field_x.value)
             assert_allclose(magnetic_field_x.to_equivalent('G', 'CGS').value,
                             magnetic_field_x.value*1e4)
+
+def test_code_unit_system_uniqueness():
+    ds1 = fake_random_ds(64)
+    ds2 = fake_random_ds(64, length_unit=2.0)
+    ds3 = fake_random_ds(64)
+
+    assert ds1.unit_registry.unit_system_id != ds2.unit_registry.unit_system_id
+    assert ds1.unit_registry.unit_system_id == ds3.unit_registry.unit_system_id
+
+    assert ds1.unit_registry.unit_system_id in unit_system_registry.keys()
+    assert ds2.unit_registry.unit_system_id in unit_system_registry.keys()
\ No newline at end of file

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -486,10 +486,10 @@
                 raise YTUnitsNotReducible(self, "cgs")
             return yt_base_unit
         else:
-            if unit_system == "code":
-                raise RuntimeError(r'You must refer to a dataset instance to convert to a '
-                                   r'code unit system. Try again with unit_system=ds instead, '
-                                   r'where \'ds\' is your dataset.')
+            if hasattr(unit_system, "unit_registry"):
+                unit_system = unit_system.unit_registry.unit_system_id
+            elif unit_system == "code":
+                unit_system = self.registry.unit_system_id
             unit_system = unit_system_registry[str(unit_system)]
             units_string = _get_system_unit_string(self.dimensions, unit_system.base_units)
             u = Unit(units_string, registry=self.registry)

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -18,6 +18,7 @@
 from distutils.version import LooseVersion
 from yt.units.unit_lookup_table import \
     default_unit_symbol_lut
+from yt.utilities.lib.fnv_hash import fnv_hash
 from yt.extern import six
 from sympy import \
     sympify, \
@@ -53,6 +54,22 @@
     def __contains__(self, item):
         return item in self.lut
 
+    _unit_system_id = None
+    @property
+    def unit_system_id(self):
+        """
+        This is a unique identifier for the unit registry created 
+        from a FNV hash. It is needed to register a dataset's code
+        unit system in the unit system registry.
+        """
+        if self._unit_system_id is None:
+            hash_data = bytearray()
+            for k, v in self.lut.items():
+                hash_data.extend(k.encode('ascii'))
+                hash_data.extend(repr(v).encode('ascii'))
+            self._unit_system_id = "code_%d" % fnv_hash(hash_data)
+        return self._unit_system_id
+
     def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None):
         """
         Add a symbol to this registry.

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -40,8 +40,8 @@
     Parameters
     ----------
     name : string
-        The name of the unit system. Will be used as the key in the *unit_system_registry*
-        dict to reference the unit system by.
+        The name of the unit system. Will be used as the key in the 
+        *unit_system_registry* dict to reference the unit system by.
     length_unit : string
         The base length unit of this unit system.
     mass_unit : string
@@ -53,10 +53,11 @@
     angle_unit : string, optional
         The base angle unit of this unit system. Defaults to "rad".
     curent_mks_unit : string, optional
-        The base current unit of this unit system. Only used in MKS or MKS-based unit systems.
+        The base current unit of this unit system. Only used in MKS 
+        or MKS-based unit systems.
     registry : :class:`yt.units.unit_registry.UnitRegistry` object
-        The unit registry associated with this unit system. Only useful for defining unit
-        systems based on code units.
+        The unit registry associated with this unit system. Only 
+        useful for defining unit systems based on code units.
     """
     def __init__(self, name, length_unit, mass_unit, time_unit,
                  temperature_unit="K", angle_unit="rad", current_mks_unit=None,
@@ -108,10 +109,11 @@
                 repr += "  %s: %s\n" % (key, self.units_map[dim])
         return repr
 
-def create_code_unit_system(ds, current_mks_unit=None):
-    code_unit_system = UnitSystem(
-        str(ds), "code_length", "code_mass", "code_time", "code_temperature",
-        current_mks_unit=current_mks_unit, registry=ds.unit_registry)
+def create_code_unit_system(unit_registry, current_mks_unit=None):
+    code_unit_system = UnitSystem(unit_registry.unit_system_id, "code_length", 
+                                  "code_mass", "code_time", "code_temperature",
+                                  current_mks_unit=current_mks_unit, 
+                                  registry=unit_registry)
     code_unit_system["velocity"] = "code_velocity"
     if current_mks_unit:
         code_unit_system["magnetic_field_mks"] = "code_magnetic"
@@ -136,7 +138,8 @@
 mks_unit_system["magnetic_field_mks"] = "T"
 mks_unit_system["charge_mks"] = "C"
 
-imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", temperature_unit="R")
+imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", 
+                                  temperature_unit="R")
 imperial_unit_system["force"] = "lbf"
 imperial_unit_system["energy"] = "ft*lbf"
 imperial_unit_system["pressure"] = "lbf/ft**2"
@@ -147,9 +150,11 @@
 
 solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr")
 
-geometrized_unit_system = UnitSystem("geometrized", "l_geom", "m_geom", "t_geom")
+geometrized_unit_system = UnitSystem("geometrized", "l_geom", 
+                                     "m_geom", "t_geom")
 
-planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", temperature_unit="T_pl")
+planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", 
+                                temperature_unit="T_pl")
 planck_unit_system["energy"] = "E_pl"
 planck_unit_system["charge_cgs"] = "q_pl"
 

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/utilities/lib/fnv_hash.pxd
--- /dev/null
+++ b/yt/utilities/lib/fnv_hash.pxd
@@ -0,0 +1,21 @@
+"""
+Definitions for fnv_hash
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import numpy as np
+cimport numpy as np
+
+cdef np.int64_t c_fnv_hash(unsigned char[:] octets)

diff -r aeec51532b28be2d6daf695960751a2fe6f07931 -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc yt/utilities/lib/fnv_hash.pyx
--- /dev/null
+++ b/yt/utilities/lib/fnv_hash.pyx
@@ -0,0 +1,39 @@
+"""
+Fast hashing routines
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2017, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+
+cdef np.int64_t c_fnv_hash(unsigned char[:] octets):
+    # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
+    # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
+    cdef np.int64_t hash_val = 2166136261
+    cdef char octet
+    for octet in octets:
+        hash_val = hash_val ^ octet
+        hash_val = hash_val * 16777619
+    return hash_val
+
+def fnv_hash(octets):
+    """
+    
+    Create a FNV hash from a bytestring.
+    Info: http://www.isthe.com/chongo/tech/comp/fnv/index.html
+    
+    Parameters
+    ----------
+    octets : bytestring
+        The string of bytes to generate a hash from. 
+    """
+    return c_fnv_hash(octets)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/1189a5b50a7a/
Changeset:   1189a5b50a7a
User:        ngoldbaum
Date:        2017-05-03 21:09:44+00:00
Summary:     [doc] recipe demonstrating how to render two fields. Closes #675
Affected #:  2 files

diff -r 8f01bbe64d90b35ea7d65e2918791cdbec362ccb -r 1189a5b50a7a348308c1524330e78a359230cf74 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -275,6 +275,14 @@
 
 .. yt_cookbook:: opaque_rendering.py
 
+Volume Rendering Multiple Fields
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can render multiple fields by adding new ``VolumeSource`` objects to the
+scene for each field you want to render.
+
+.. yt_cookbook:: render_two_fields.py
+
 .. _cookbook-amrkdtree_downsampling:
 
 Downsampling Data for Volume Rendering

diff -r 8f01bbe64d90b35ea7d65e2918791cdbec362ccb -r 1189a5b50a7a348308c1524330e78a359230cf74 doc/source/cookbook/render_two_fields.py
--- /dev/null
+++ b/doc/source/cookbook/render_two_fields.py
@@ -0,0 +1,27 @@
+import yt
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
+
+filePath = "Sedov_3d/sedov_hdf5_chk_0003"
+ds = yt.load(filePath)
+ds.periodicity = (True, True, True)
+
+sc = Scene()
+
+# set up camera
+cam = sc.add_camera(ds, lens_type='perspective')
+cam.resolution = [400, 400]
+
+cam.position = ds.arr([1, 1, 1], 'cm')
+cam.switch_orientation()
+
+# add rendering of density field
+dens = VolumeSource(ds, field='dens')
+dens.use_ghost_zones = True
+sc.add_source(dens)
+sc.save('density.png', sigma_clip=6)
+
+# add rendering of x-velocity field
+vel = VolumeSource(ds, field='velx')
+vel.use_ghost_zones = True
+sc.add_source(vel)
+sc.save('density_any_velocity.png', sigma_clip=6)


https://bitbucket.org/yt_analysis/yt/commits/4963f760fae5/
Changeset:   4963f760fae5
User:        ngoldbaum
Date:        2017-05-08 19:58:33+00:00
Summary:     Merge pull request #1364 from ngoldbaum/render-two-fields

[doc] recipe demonstrating how to render two fields. Closes #675
Affected #:  2 files

diff -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc -r 4963f760fae518c6595a2073728598b87c58981c doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -275,6 +275,14 @@
 
 .. yt_cookbook:: opaque_rendering.py
 
+Volume Rendering Multiple Fields
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can render multiple fields by adding new ``VolumeSource`` objects to the
+scene for each field you want to render.
+
+.. yt_cookbook:: render_two_fields.py
+
 .. _cookbook-amrkdtree_downsampling:
 
 Downsampling Data for Volume Rendering

diff -r d4f3c9c958e4b566e983f8d04d29c977a93bf2cc -r 4963f760fae518c6595a2073728598b87c58981c doc/source/cookbook/render_two_fields.py
--- /dev/null
+++ b/doc/source/cookbook/render_two_fields.py
@@ -0,0 +1,27 @@
+import yt
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
+
+filePath = "Sedov_3d/sedov_hdf5_chk_0003"
+ds = yt.load(filePath)
+ds.periodicity = (True, True, True)
+
+sc = Scene()
+
+# set up camera
+cam = sc.add_camera(ds, lens_type='perspective')
+cam.resolution = [400, 400]
+
+cam.position = ds.arr([1, 1, 1], 'cm')
+cam.switch_orientation()
+
+# add rendering of density field
+dens = VolumeSource(ds, field='dens')
+dens.use_ghost_zones = True
+sc.add_source(dens)
+sc.save('density.png', sigma_clip=6)
+
+# add rendering of x-velocity field
+vel = VolumeSource(ds, field='velx')
+vel.use_ghost_zones = True
+sc.add_source(vel)
+sc.save('density_any_velocity.png', sigma_clip=6)


https://bitbucket.org/yt_analysis/yt/commits/bd367a01b8a4/
Changeset:   bd367a01b8a4
User:        ngoldbaum
Date:        2017-05-04 14:21:07+00:00
Summary:     use output_units instead of units when generating ghost zones

closes #1368
Affected #:  2 files

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r bd367a01b8a467065b0e3013bbacad0a4a22a135 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -274,7 +274,7 @@
         for field in fields:
             finfo = self.ds._get_field_info(field)
             new_fields[field] = self.ds.arr(
-                np.zeros(self.ActiveDimensions + 1), finfo.units)
+                np.zeros(self.ActiveDimensions + 1), finfo.output_units)
         if no_ghost:
             for field in fields:
                 # Ensure we have the native endianness in this array.  Avoid making

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r bd367a01b8a467065b0e3013bbacad0a4a22a135 yt/data_objects/tests/test_fluxes.py
--- a/yt/data_objects/tests/test_fluxes.py
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -101,3 +101,13 @@
 
         assert os.path.exists('my_galaxy_emis.obj')
         assert os.path.exists('my_galaxy_emis.mtl')
+
+ at requires_file(ISOGAL)
+def test_correct_output_unit():
+    # see issue #1368
+    ds = load(ISOGAL)
+    x = y = z = .5
+    sp1 = ds.sphere((x,y,z), (300, 'kpc'))
+    Nmax = sp1.max('HI_Density')
+    sur = ds.surface(sp1,"HI_Density", .5*Nmax)
+    sur['x'][0]


https://bitbucket.org/yt_analysis/yt/commits/8e2a50964817/
Changeset:   8e2a50964817
User:        ngoldbaum
Date:        2017-05-08 19:58:53+00:00
Summary:     Merge pull request #1369 from ngoldbaum/surface-fix

Use output_units instead of units when generating ghost zones. closes #1368
Affected #:  2 files

diff -r 4963f760fae518c6595a2073728598b87c58981c -r 8e2a5096481710409ce4fbcf2c1899c719fc9430 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -274,7 +274,7 @@
         for field in fields:
             finfo = self.ds._get_field_info(field)
             new_fields[field] = self.ds.arr(
-                np.zeros(self.ActiveDimensions + 1), finfo.units)
+                np.zeros(self.ActiveDimensions + 1), finfo.output_units)
         if no_ghost:
             for field in fields:
                 # Ensure we have the native endianness in this array.  Avoid making

diff -r 4963f760fae518c6595a2073728598b87c58981c -r 8e2a5096481710409ce4fbcf2c1899c719fc9430 yt/data_objects/tests/test_fluxes.py
--- a/yt/data_objects/tests/test_fluxes.py
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -101,3 +101,13 @@
 
         assert os.path.exists('my_galaxy_emis.obj')
         assert os.path.exists('my_galaxy_emis.mtl')
+
+ at requires_file(ISOGAL)
+def test_correct_output_unit():
+    # see issue #1368
+    ds = load(ISOGAL)
+    x = y = z = .5
+    sp1 = ds.sphere((x,y,z), (300, 'kpc'))
+    Nmax = sp1.max('HI_Density')
+    sur = ds.surface(sp1,"HI_Density", .5*Nmax)
+    sur['x'][0]


https://bitbucket.org/yt_analysis/yt/commits/0dcde2a9a2fa/
Changeset:   0dcde2a9a2fa
User:        ngoldbaum
Date:        2017-05-04 16:25:01+00:00
Summary:     use the original field's units when aliasing dimensionless fields

closes #1344
Affected #:  2 files

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 0dcde2a9a2fa1e077ffa9dd3720ed0b6adb845ac yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -21,6 +21,7 @@
 from yt.extern.six import string_types
 from yt.funcs import mylog, only_on_root
 from yt.units.unit_object import Unit
+from yt.units.dimensions import dimensionless
 from .derived_field import \
     DerivedField, \
     NullFunc, \
@@ -86,15 +87,15 @@
                 self[(ftype, f)].units = self["index", f].units
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
-        skip_output_units = ("code_length",)
+        skip_output_units = ("code_length")
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
             units = self.ds.field_units.get((ptype, f), units)
+            output_units = units
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(self.ds.unit_system[u.dimensions])
-            else:
-                output_units = units
+                if u.dimensions is not dimensionless:
+                    output_units = str(self.ds.unit_system[u.dimensions])
             if (ptype, f) not in self.field_list:
                 continue
             self.add_output_field((ptype, f), sampling_type="particle",
@@ -307,7 +308,10 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(self.ds.unit_system[u.dimensions])
+            if u.dimensions is not dimensionless:
+                units = str(self.ds.unit_system[u.dimensions])
+            else:
+                units = self[original_name].units
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 0dcde2a9a2fa1e077ffa9dd3720ed0b6adb845ac yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -13,12 +13,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
 import os
 import shutil
 import tempfile
 import unittest
 
-from yt.testing import assert_raises
+from yt.frontends.stream.data_structures import load_uniform_grid
+from yt.testing import \
+    assert_equal, \
+    assert_raises
 from yt.convenience import load
 from yt.utilities.exceptions import YTOutputNotIdentified
 
@@ -43,3 +47,15 @@
         assert_raises(YTOutputNotIdentified, load, "not_a_file")
         assert_raises(YTOutputNotIdentified, load, "empty_file")
         assert_raises(YTOutputNotIdentified, load, "empty_directory")
+
+def test_dimensionless_field_units():
+    Z = np.random.uniform(size=(32,32,32))
+    d = np.random.uniform(size=(32,32,32))
+
+    data = {"density": d, "metallicity": Z}
+
+    ds = load_uniform_grid(data, (32,32,32))
+
+    dd = ds.all_data()
+
+    assert_equal(Z.max(), dd["metallicity"].max())


https://bitbucket.org/yt_analysis/yt/commits/fb56ba9587fe/
Changeset:   fb56ba9587fe
User:        ngoldbaum
Date:        2017-05-08 19:59:09+00:00
Summary:     Merge pull request #1370 from ngoldbaum/metallicity-fix

Use the original field's units when aliasing dimensionless fields.
Affected #:  2 files

diff -r 8e2a5096481710409ce4fbcf2c1899c719fc9430 -r fb56ba9587feaba91561d6c9572a3cfd754eb325 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -21,6 +21,7 @@
 from yt.extern.six import string_types
 from yt.funcs import mylog, only_on_root
 from yt.units.unit_object import Unit
+from yt.units.dimensions import dimensionless
 from .derived_field import \
     DerivedField, \
     NullFunc, \
@@ -86,15 +87,15 @@
                 self[(ftype, f)].units = self["index", f].units
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
-        skip_output_units = ("code_length",)
+        skip_output_units = ("code_length")
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
             units = self.ds.field_units.get((ptype, f), units)
+            output_units = units
             if (f in aliases or ptype not in self.ds.particle_types_raw) and \
                 units not in skip_output_units:
                 u = Unit(units, registry = self.ds.unit_registry)
-                output_units = str(self.ds.unit_system[u.dimensions])
-            else:
-                output_units = units
+                if u.dimensions is not dimensionless:
+                    output_units = str(self.ds.unit_system[u.dimensions])
             if (ptype, f) not in self.field_list:
                 continue
             self.add_output_field((ptype, f), sampling_type="particle",
@@ -307,7 +308,10 @@
             # as well.
             u = Unit(self[original_name].units,
                       registry = self.ds.unit_registry)
-            units = str(self.ds.unit_system[u.dimensions])
+            if u.dimensions is not dimensionless:
+                units = str(self.ds.unit_system[u.dimensions])
+            else:
+                units = self[original_name].units
         self.field_aliases[alias_name] = original_name
         self.add_field(alias_name,
             function = TranslationFunc(original_name),

diff -r 8e2a5096481710409ce4fbcf2c1899c719fc9430 -r fb56ba9587feaba91561d6c9572a3cfd754eb325 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -13,12 +13,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
 import os
 import shutil
 import tempfile
 import unittest
 
-from yt.testing import assert_raises
+from yt.frontends.stream.data_structures import load_uniform_grid
+from yt.testing import \
+    assert_equal, \
+    assert_raises
 from yt.convenience import load
 from yt.utilities.exceptions import YTOutputNotIdentified
 
@@ -43,3 +47,15 @@
         assert_raises(YTOutputNotIdentified, load, "not_a_file")
         assert_raises(YTOutputNotIdentified, load, "empty_file")
         assert_raises(YTOutputNotIdentified, load, "empty_directory")
+
+def test_dimensionless_field_units():
+    Z = np.random.uniform(size=(32,32,32))
+    d = np.random.uniform(size=(32,32,32))
+
+    data = {"density": d, "metallicity": Z}
+
+    ds = load_uniform_grid(data, (32,32,32))
+
+    dd = ds.all_data()
+
+    assert_equal(Z.max(), dd["metallicity"].max())


https://bitbucket.org/yt_analysis/yt/commits/5da659cc6d40/
Changeset:   5da659cc6d40
User:        ngoldbaum
Date:        2017-05-05 14:34:16+00:00
Summary:     replace profile.variance with profile.standard_deviation

closes #1306
Affected #:  5 files

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 5da659cc6d4049114c55e11a37695dc8fd432624 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -93,10 +93,10 @@
 
 Profile objects can be created from any data object (see :ref:`data-objects`,
 specifically the section :ref:`available-objects` for more information) and are
-best thought of as distribution calculations.  They can either sum up or
-average one quantity with respect to one or more other quantities, and they do
-this over all the data contained in their source object.  When calculating average
-values, the variance will also be calculated.
+best thought of as distribution calculations.  They can either sum up or average
+one quantity with respect to one or more other quantities, and they do this over
+all the data contained in their source object.  When calculating average values,
+the standard deviation will also be calculated.
 
 To generate a profile, one need only specify the binning fields and the field
 to be profiled.  The binning fields are given together in a list.  The
@@ -131,16 +131,17 @@
 
    print(profile.used)
 
-If a weight field was given, the profile data will represent the weighted mean of
-a field.  In this case, the weighted variance will be calculated automatically and
-can be access via the ``profile.variance`` attribute.
+If a weight field was given, the profile data will represent the weighted mean
+of a field.  In this case, the weighted standard deviation will be calculated
+automatically and can be access via the ``profile.standard_deviation``
+attribute.
 
 .. code-block:: python
 
-   print(profile.variance["gas", "temperature"])
+   print(profile.standard_deviation["gas", "temperature"])
 
-A two-dimensional profile of the total gas mass in bins of density and temperature
-can be created as follows:
+A two-dimensional profile of the total gas mass in bins of density and
+temperature can be created as follows:
 
 .. code-block:: python
 

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 5da659cc6d4049114c55e11a37695dc8fd432624 doc/source/cookbook/profile_with_standard_deviation.py
--- /dev/null
+++ b/doc/source/cookbook/profile_with_standard_deviation.py
@@ -0,0 +1,34 @@
+import matplotlib.pyplot as plt
+import yt
+
+# Load the dataset.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create a sphere of radius 1 Mpc centered on the max density location.
+sp = ds.sphere("max", (1, "Mpc"))
+
+# Calculate and store the bulk velocity for the sphere.
+bulk_velocity = sp.quantities.bulk_velocity()
+sp.set_field_parameter('bulk_velocity', bulk_velocity)
+
+# Create a 1D profile object for profiles over radius
+# and add a velocity profile.
+prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
+                         units = {'radius': 'kpc'},
+                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
+                         weight_field='cell_mass')
+
+# Create arrays to plot.
+radius = prof.x
+mean = prof['gas', 'velocity_magnitude']
+std = prof.standard_deviation['gas', 'velocity_magnitude']
+
+# Plot the average velocity magnitude.
+plt.loglog(radius, mean, label='Mean')
+# Plot the variance of the velocity magnitude.
+plt.loglog(radius, std, label='Standard Deviation')
+plt.xlabel('r [kpc]')
+plt.ylabel('v [cm/s]')
+plt.legend()
+
+plt.savefig('velocity_profiles.png')

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 5da659cc6d4049114c55e11a37695dc8fd432624 doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import matplotlib.pyplot as plt
-import yt
-
-# Load the dataset.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-
-# Create a sphere of radius 1 Mpc centered on the max density location.
-sp = ds.sphere("max", (1, "Mpc"))
-
-# Calculate and store the bulk velocity for the sphere.
-bulk_velocity = sp.quantities.bulk_velocity()
-sp.set_field_parameter('bulk_velocity', bulk_velocity)
-
-# Create a 1D profile object for profiles over radius
-# and add a velocity profile.
-prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
-                         units = {'radius': 'kpc'},
-                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
-                         weight_field='cell_mass')
-
-# Create arrays to plot.
-radius = prof.x.value
-mean = prof['gas', 'velocity_magnitude'].value
-variance = prof.variance['gas', 'velocity_magnitude'].value
-
-# Plot the average velocity magnitude.
-plt.loglog(radius, mean, label='Mean')
-# Plot the variance of the velocity magnitude.
-plt.loglog(radius, variance, label='Standard Deviation')
-plt.xlabel('r [kpc]')
-plt.ylabel('v [cm/s]')
-plt.legend()
-
-plt.savefig('velocity_profiles.png')

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 5da659cc6d4049114c55e11a37695dc8fd432624 doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -80,17 +80,17 @@
 
 .. yt_cookbook:: time_series_profiles.py
 
-.. _cookbook-profile-variance:
+.. _cookbook-profile-stddev:
 
-Profiles with Variance Values
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Profiles with Standard Deviation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-This shows how to plot the variance for a 1D profile.  In this example, we
-manually create a 1D profile object, which gives us access to the variance
-data.
-See :ref:`how-to-make-1d-profiles` for more information.
+This shows how to plot a 1D profile with error bars indicating the standard
+deviation of the field values in each profile bin.  In this example, we manually
+create a 1D profile object, which gives us access to the standard deviation
+data.  See :ref:`how-to-make-1d-profiles` for more information.
 
-.. yt_cookbook:: profile_with_variance.py
+.. yt_cookbook:: profile_with_standard_deviation.py
 
 Making Plots of Multiple Fields Simultaneously
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r 5da659cc6d4049114c55e11a37695dc8fd432624 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -91,7 +91,7 @@
         self.field_info = {}
         self.field_data = YTFieldData()
         if weight_field is not None:
-            self.variance = YTFieldData()
+            self.standard_deviation = YTFieldData()
             weight_field = self.data_source._determine_fields(weight_field)[0]
         self.weight_field = weight_field
         self.field_units = {}
@@ -153,13 +153,13 @@
 
         all_val = np.zeros_like(temp_storage.values)
         all_mean = np.zeros_like(temp_storage.mvalues)
-        all_var = np.zeros_like(temp_storage.qvalues)
+        all_std = np.zeros_like(temp_storage.qvalues)
         all_weight = np.zeros_like(temp_storage.weight_values)
         all_used = np.zeros_like(temp_storage.used, dtype="bool")
 
-        # Combine the weighted mean and variance from each processor.
-        # For two samples with total weight, mean, and variance 
-        # given by w, m, and s, their combined mean and variance are:
+        # Combine the weighted mean and standard_deviation from each processor.
+        # For two samples with total weight, mean, and standard_deviation 
+        # given by w, m, and s, their combined mean and standard_deviation are:
         # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
         # s12 = (m1 * (s1**2 + (m1 - m12)**2) + 
         #        m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
@@ -180,8 +180,8 @@
                    all_store[p].weight_values)[all_store[p].used] / \
                    all_weight[all_store[p].used]
 
-                all_var[..., i][all_store[p].used] = \
-                  (old_weight * (all_var[..., i] +
+                all_std[..., i][all_store[p].used] = \
+                  (old_weight * (all_std[..., i] +
                                  (old_mean[..., i] - all_mean[..., i])**2) +
                    all_store[p].weight_values *
                    (all_store[p].qvalues[..., i] + 
@@ -189,7 +189,7 @@
                      all_mean[..., i])**2))[all_store[p].used] / \
                     all_weight[all_store[p].used]
 
-        all_var = np.sqrt(all_var)
+        all_std = np.sqrt(all_std)
         del all_store
         self.used = all_used
         blank = ~all_used
@@ -206,10 +206,10 @@
                 self.field_data[field] = \
                   array_like_field(self.data_source, 
                                    all_mean[...,i], field)
-                self.variance[field] = \
+                self.standard_deviation[field] = \
                   array_like_field(self.data_source,
-                                   all_var[...,i], field)
-                self.variance[field][blank] = 0.0
+                                   all_std[...,i], field)
+                self.standard_deviation[field][blank] = 0.0
             self.field_data[field][blank] = 0.0
             self.field_units[field] = self.field_data[field].units
             if isinstance(field, tuple):


https://bitbucket.org/yt_analysis/yt/commits/7ce9d052498b/
Changeset:   7ce9d052498b
User:        ngoldbaum
Date:        2017-05-05 15:00:32+00:00
Summary:     add a deprecated variance property for backward compatibility
Affected #:  2 files

diff -r 5da659cc6d4049114c55e11a37695dc8fd432624 -r 7ce9d052498bb0dc8889f7306d57f8699e0e6ef2 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -246,9 +246,9 @@
         setattr(halo, storage, halo_store)
     halo_store.update(prof_store)
 
-    if hasattr(my_profile, "variance"):
-        variance_store = dict([(field, my_profile.variance[field]) \
-                           for field in my_profile.variance])
+    if my_profile.standard_deviation is not None:
+        variance_store = dict([(field, my_profile.standard_deviation[field]) \
+                               for field in my_profile.standard_deviation])
         variance_storage = "%s_variance" % storage
         if hasattr(halo, variance_storage):
             halo_variance_store = getattr(halo, variance_storage)

diff -r 5da659cc6d4049114c55e11a37695dc8fd432624 -r 7ce9d052498bb0dc8889f7306d57f8699e0e6ef2 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -21,7 +21,8 @@
 from yt.funcs import \
     get_output_filename, \
     ensure_list, \
-    iterable
+    iterable, \
+    issue_deprecation_warning
 from yt.units.yt_array import \
     array_like_field, \
     YTQuantity
@@ -93,10 +94,20 @@
         if weight_field is not None:
             self.standard_deviation = YTFieldData()
             weight_field = self.data_source._determine_fields(weight_field)[0]
+        else:
+            self.standard_deviation = None
         self.weight_field = weight_field
         self.field_units = {}
         ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
+    @property
+    def variance(self):
+        issue_deprecation_warning("""
+profile.variance incorrectly returns the profile standard deviation and has 
+been deprecated, use profile.standard_deviation instead."""
+        )
+        return self.standard_deviation
+
     def add_fields(self, fields):
         """Add fields to profile
 
@@ -157,9 +168,9 @@
         all_weight = np.zeros_like(temp_storage.weight_values)
         all_used = np.zeros_like(temp_storage.used, dtype="bool")
 
-        # Combine the weighted mean and standard_deviation from each processor.
-        # For two samples with total weight, mean, and standard_deviation 
-        # given by w, m, and s, their combined mean and standard_deviation are:
+        # Combine the weighted mean and standard deviation from each processor.
+        # For two samples with total weight, mean, and standard deviation 
+        # given by w, m, and s, their combined mean and standard deviation are:
         # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
         # s12 = (m1 * (s1**2 + (m1 - m12)**2) + 
         #        m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)


https://bitbucket.org/yt_analysis/yt/commits/d156390fc522/
Changeset:   d156390fc522
User:        ngoldbaum
Date:        2017-05-08 19:59:26+00:00
Summary:     Merge pull request #1372 from ngoldbaum/profile-stddev

Deprecate profile.variance in favor of profile.standard_deviation
Affected #:  6 files

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -93,10 +93,10 @@
 
 Profile objects can be created from any data object (see :ref:`data-objects`,
 specifically the section :ref:`available-objects` for more information) and are
-best thought of as distribution calculations.  They can either sum up or
-average one quantity with respect to one or more other quantities, and they do
-this over all the data contained in their source object.  When calculating average
-values, the variance will also be calculated.
+best thought of as distribution calculations.  They can either sum up or average
+one quantity with respect to one or more other quantities, and they do this over
+all the data contained in their source object.  When calculating average values,
+the standard deviation will also be calculated.
 
 To generate a profile, one need only specify the binning fields and the field
 to be profiled.  The binning fields are given together in a list.  The
@@ -131,16 +131,17 @@
 
    print(profile.used)
 
-If a weight field was given, the profile data will represent the weighted mean of
-a field.  In this case, the weighted variance will be calculated automatically and
-can be access via the ``profile.variance`` attribute.
+If a weight field was given, the profile data will represent the weighted mean
+of a field.  In this case, the weighted standard deviation will be calculated
+automatically and can be access via the ``profile.standard_deviation``
+attribute.
 
 .. code-block:: python
 
-   print(profile.variance["gas", "temperature"])
+   print(profile.standard_deviation["gas", "temperature"])
 
-A two-dimensional profile of the total gas mass in bins of density and temperature
-can be created as follows:
+A two-dimensional profile of the total gas mass in bins of density and
+temperature can be created as follows:
 
 .. code-block:: python
 

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e doc/source/cookbook/profile_with_standard_deviation.py
--- /dev/null
+++ b/doc/source/cookbook/profile_with_standard_deviation.py
@@ -0,0 +1,34 @@
+import matplotlib.pyplot as plt
+import yt
+
+# Load the dataset.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create a sphere of radius 1 Mpc centered on the max density location.
+sp = ds.sphere("max", (1, "Mpc"))
+
+# Calculate and store the bulk velocity for the sphere.
+bulk_velocity = sp.quantities.bulk_velocity()
+sp.set_field_parameter('bulk_velocity', bulk_velocity)
+
+# Create a 1D profile object for profiles over radius
+# and add a velocity profile.
+prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
+                         units = {'radius': 'kpc'},
+                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
+                         weight_field='cell_mass')
+
+# Create arrays to plot.
+radius = prof.x
+mean = prof['gas', 'velocity_magnitude']
+std = prof.standard_deviation['gas', 'velocity_magnitude']
+
+# Plot the average velocity magnitude.
+plt.loglog(radius, mean, label='Mean')
+# Plot the variance of the velocity magnitude.
+plt.loglog(radius, std, label='Standard Deviation')
+plt.xlabel('r [kpc]')
+plt.ylabel('v [cm/s]')
+plt.legend()
+
+plt.savefig('velocity_profiles.png')

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import matplotlib.pyplot as plt
-import yt
-
-# Load the dataset.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-
-# Create a sphere of radius 1 Mpc centered on the max density location.
-sp = ds.sphere("max", (1, "Mpc"))
-
-# Calculate and store the bulk velocity for the sphere.
-bulk_velocity = sp.quantities.bulk_velocity()
-sp.set_field_parameter('bulk_velocity', bulk_velocity)
-
-# Create a 1D profile object for profiles over radius
-# and add a velocity profile.
-prof = yt.create_profile(sp, 'radius', ('gas', 'velocity_magnitude'),
-                         units = {'radius': 'kpc'},
-                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
-                         weight_field='cell_mass')
-
-# Create arrays to plot.
-radius = prof.x.value
-mean = prof['gas', 'velocity_magnitude'].value
-variance = prof.variance['gas', 'velocity_magnitude'].value
-
-# Plot the average velocity magnitude.
-plt.loglog(radius, mean, label='Mean')
-# Plot the variance of the velocity magnitude.
-plt.loglog(radius, variance, label='Standard Deviation')
-plt.xlabel('r [kpc]')
-plt.ylabel('v [cm/s]')
-plt.legend()
-
-plt.savefig('velocity_profiles.png')

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -80,17 +80,17 @@
 
 .. yt_cookbook:: time_series_profiles.py
 
-.. _cookbook-profile-variance:
+.. _cookbook-profile-stddev:
 
-Profiles with Variance Values
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Profiles with Standard Deviation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-This shows how to plot the variance for a 1D profile.  In this example, we
-manually create a 1D profile object, which gives us access to the variance
-data.
-See :ref:`how-to-make-1d-profiles` for more information.
+This shows how to plot a 1D profile with error bars indicating the standard
+deviation of the field values in each profile bin.  In this example, we manually
+create a 1D profile object, which gives us access to the standard deviation
+data.  See :ref:`how-to-make-1d-profiles` for more information.
 
-.. yt_cookbook:: profile_with_variance.py
+.. yt_cookbook:: profile_with_standard_deviation.py
 
 Making Plots of Multiple Fields Simultaneously
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -246,9 +246,9 @@
         setattr(halo, storage, halo_store)
     halo_store.update(prof_store)
 
-    if hasattr(my_profile, "variance"):
-        variance_store = dict([(field, my_profile.variance[field]) \
-                           for field in my_profile.variance])
+    if my_profile.standard_deviation is not None:
+        variance_store = dict([(field, my_profile.standard_deviation[field]) \
+                               for field in my_profile.standard_deviation])
         variance_storage = "%s_variance" % storage
         if hasattr(halo, variance_storage):
             halo_variance_store = getattr(halo, variance_storage)

diff -r fb56ba9587feaba91561d6c9572a3cfd754eb325 -r d156390fc522026f3ce91abd04acb25c79f2e58e yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -21,7 +21,8 @@
 from yt.funcs import \
     get_output_filename, \
     ensure_list, \
-    iterable
+    iterable, \
+    issue_deprecation_warning
 from yt.units.yt_array import \
     array_like_field, \
     YTQuantity
@@ -91,12 +92,22 @@
         self.field_info = {}
         self.field_data = YTFieldData()
         if weight_field is not None:
-            self.variance = YTFieldData()
+            self.standard_deviation = YTFieldData()
             weight_field = self.data_source._determine_fields(weight_field)[0]
+        else:
+            self.standard_deviation = None
         self.weight_field = weight_field
         self.field_units = {}
         ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
+    @property
+    def variance(self):
+        issue_deprecation_warning("""
+profile.variance incorrectly returns the profile standard deviation and has 
+been deprecated, use profile.standard_deviation instead."""
+        )
+        return self.standard_deviation
+
     def add_fields(self, fields):
         """Add fields to profile
 
@@ -153,13 +164,13 @@
 
         all_val = np.zeros_like(temp_storage.values)
         all_mean = np.zeros_like(temp_storage.mvalues)
-        all_var = np.zeros_like(temp_storage.qvalues)
+        all_std = np.zeros_like(temp_storage.qvalues)
         all_weight = np.zeros_like(temp_storage.weight_values)
         all_used = np.zeros_like(temp_storage.used, dtype="bool")
 
-        # Combine the weighted mean and variance from each processor.
-        # For two samples with total weight, mean, and variance 
-        # given by w, m, and s, their combined mean and variance are:
+        # Combine the weighted mean and standard deviation from each processor.
+        # For two samples with total weight, mean, and standard deviation 
+        # given by w, m, and s, their combined mean and standard deviation are:
         # m12 = (m1 * w1 + m2 * w2) / (w1 + w2)
         # s12 = (m1 * (s1**2 + (m1 - m12)**2) + 
         #        m2 * (s2**2 + (m2 - m12)**2)) / (w1 + w2)
@@ -180,8 +191,8 @@
                    all_store[p].weight_values)[all_store[p].used] / \
                    all_weight[all_store[p].used]
 
-                all_var[..., i][all_store[p].used] = \
-                  (old_weight * (all_var[..., i] +
+                all_std[..., i][all_store[p].used] = \
+                  (old_weight * (all_std[..., i] +
                                  (old_mean[..., i] - all_mean[..., i])**2) +
                    all_store[p].weight_values *
                    (all_store[p].qvalues[..., i] + 
@@ -189,7 +200,7 @@
                      all_mean[..., i])**2))[all_store[p].used] / \
                     all_weight[all_store[p].used]
 
-        all_var = np.sqrt(all_var)
+        all_std = np.sqrt(all_std)
         del all_store
         self.used = all_used
         blank = ~all_used
@@ -206,10 +217,10 @@
                 self.field_data[field] = \
                   array_like_field(self.data_source, 
                                    all_mean[...,i], field)
-                self.variance[field] = \
+                self.standard_deviation[field] = \
                   array_like_field(self.data_source,
-                                   all_var[...,i], field)
-                self.variance[field][blank] = 0.0
+                                   all_std[...,i], field)
+                self.standard_deviation[field][blank] = 0.0
             self.field_data[field][blank] = 0.0
             self.field_units[field] = self.field_data[field].units
             if isinstance(field, tuple):


https://bitbucket.org/yt_analysis/yt/commits/d8c571ef10c9/
Changeset:   d8c571ef10c9
User:        ngoldbaum
Date:        2017-05-05 16:39:10+00:00
Summary:     attempt to reduce the memory consumption of the unit tests
Affected #:  3 files

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r d8c571ef10c97d67c02525720770ea387aecbc3b yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -90,7 +90,7 @@
 
 
 def test_arbitrary_grid():
-    for ncells in [64, 128, 256]:
+    for ncells in [32, 64]:
         for px in [0.125, 0.25, 0.55519]:
 
             particle_data = {

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r d8c571ef10c97d67c02525720770ea387aecbc3b yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -11,12 +11,9 @@
 import yt.utilities.flagging_methods as fm
 from yt.units.yt_array import uconcatenate
 
-def setup() :
-    pass
-
 def test_particle_generator():
     # First generate our dataset
-    domain_dims = (128, 128, 128)
+    domain_dims = (64, 64, 64)
     dens = np.zeros(domain_dims) + 0.1
     temp = 4.*np.ones(domain_dims)
     fields = {"density": (dens, 'code_mass/code_length**3'),
@@ -114,3 +111,11 @@
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
     particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+
+    del pdata
+    del ds
+    del particles1
+    del particles2
+    del fields
+    del dens
+    del temp

diff -r 57522d3847609648ece6c2238d2e9d76c8037d3b -r d8c571ef10c97d67c02525720770ea387aecbc3b yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -237,6 +237,8 @@
     def tearDown(self):
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
+        del self.ds
+        del self.slc
 
     def test_hide_show_axes(self):
         self.slc.hide_axes()
@@ -351,6 +353,17 @@
     def tearDown(self):
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
+        del self.ds
+        del self.slc
+        del self.slices
+        del self.projections
+        del self.projections_ds
+        del self.projections_c
+        del self.projections_wf
+        del self.projections_w
+        del self.projection_m
+        del self.offaxis_slice
+        del self.offaxis_proj
 
     @parameterized.expand(
         param.explicit(item)


https://bitbucket.org/yt_analysis/yt/commits/400ba24a5cf2/
Changeset:   400ba24a5cf2
User:        ngoldbaum
Date:        2017-05-05 16:49:06+00:00
Summary:     try to make test_particle_generator more svelte
Affected #:  1 file

diff -r d8c571ef10c97d67c02525720770ea387aecbc3b -r 400ba24a5cf2d9d5f0d869fec6dac6af00af22c2 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -13,7 +13,7 @@
 
 def test_particle_generator():
     # First generate our dataset
-    domain_dims = (64, 64, 64)
+    domain_dims = (32, 32, 32)
     dens = np.zeros(domain_dims) + 0.1
     temp = 4.*np.ones(domain_dims)
     fields = {"density": (dens, 'code_mass/code_length**3'),
@@ -47,8 +47,11 @@
 
     tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
     assert(np.unique(tags).size == num_particles)
+
+    del tags
+    
     # Set up a lattice of particles
-    pdims = np.array([64,64,64])
+    pdims = np.array([32,32,32])
     def new_indices() :
         # We just add new indices onto the existing ones
         return np.arange((np.product(pdims)))+num_particles
@@ -75,6 +78,9 @@
     assert_almost_equal( ypos, ypred)
     assert_almost_equal( zpos, zpred)
 
+    del xpos, ypos, zpos
+    del xpred, ypred, zpred
+
     #Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
@@ -89,6 +95,8 @@
     tags.sort()
     assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
 
+    del tags
+
     # Test that the old particles have zero for the new field
     old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                           for i, grid in enumerate(ds.index.grids)]
@@ -96,6 +104,9 @@
                   for i, grid in enumerate(ds.index.grids)]
     assert_equal(old_particle_temps, test_zeros)
 
+    del old_particle_temps
+    del test_zeros
+
     #Now dump all of these particle fields out into a dict
     pdata = {}
     dd = ds.all_data()


https://bitbucket.org/yt_analysis/yt/commits/c9cb34e46c9a/
Changeset:   c9cb34e46c9a
User:        ngoldbaum
Date:        2017-05-05 17:00:48+00:00
Summary:     try to make test_boolean_regions use less memory
Affected #:  1 file

diff -r 400ba24a5cf2d9d5f0d869fec6dac6af00af22c2 -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -280,6 +280,7 @@
     b6 = bo6["index", "morton_index"]
     b6.sort()
     assert_array_equal(b6, np.setxor1d(i1, i2))
+    del ds
 
 def test_boolean_ellipsoids_no_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, no overlap)


https://bitbucket.org/yt_analysis/yt/commits/47035307ac10/
Changeset:   47035307ac10
User:        ngoldbaum
Date:        2017-05-05 18:09:41+00:00
Summary:     more test slimming
Affected #:  7 files

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -26,7 +26,7 @@
 
     np.random.seed(seed=0x4d3d3d3)
 
-    dims = (8,8,1024)
+    dims = (8, 8, 128)
     v_shift = 1.0e7*u.cm/u.s
     sigma_v = 2.0e7*u.cm/u.s
     T_0 = 1.0e8*u.Kelvin

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -895,7 +895,7 @@
                 yield center
 
 def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False,
-             call_pdb = False):
+             call_pdb=False, module=None):
     from yt.utilities.on_demand_imports import _nose
     import sys
     from yt.utilities.logger import ytLogger as mylog
@@ -911,6 +911,8 @@
         nose_argv.append('--with-answer-testing')
     if answer_big_data:
         nose_argv.append('--answer-big-data')
+    if module:
+        nose_argv.append(module)
     initial_dir = os.getcwd()
     yt_file = os.path.abspath(__file__)
     yt_dir = os.path.dirname(yt_file)

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -209,6 +209,11 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
+    @classmethod
+    def tearDownClass(cls):
+        del cls.ds
+        del cls.particle_phases
+
     @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
     def test_particle_phase_plot(self, fname):
         for p in self.particle_phases:
@@ -267,6 +272,15 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
+    @classmethod
+    def tearDownClass(cls):
+        del cls.pplots
+        del cls.pplots_ds
+        del cls.pplots_c
+        del cls.pplots_wf
+        del cls.pplots_w
+        del cls.pplots_m
+
     @parameterized.expand(
         param.explicit(item)
         for item in itertools.product(range(3), TEST_FLNMS))

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -266,6 +266,10 @@
             self.ds = fake_random_ds(64)
             self.slc = SlicePlot(self.ds, 0, "density")
 
+    def tearDown(self):
+        del self.ds
+        del self.slc
+
     def _assert_05cm(self):
         assert_array_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
                          [(YTQuantity(0.25, 'cm'), YTQuantity(0.75, 'cm')),
@@ -353,17 +357,19 @@
     def tearDown(self):
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
-        del self.ds
-        del self.slc
-        del self.slices
-        del self.projections
-        del self.projections_ds
-        del self.projections_c
-        del self.projections_wf
-        del self.projections_w
-        del self.projection_m
-        del self.offaxis_slice
-        del self.offaxis_proj
+
+    @classmethod
+    def tearDownClass(cls):
+        del cls.slc
+        del cls.slices
+        del cls.projections
+        del cls.projections_ds
+        del cls.projections_c
+        del cls.projections_wf
+        del cls.projections_w
+        del cls.projection_m
+        del cls.offaxis_slice
+        del cls.offaxis_proj
 
     @parameterized.expand(
         param.explicit(item)

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -123,6 +123,12 @@
         cls.phases = phases
         cls.ds = test_ds
 
+    @classmethod
+    def tearDownClass(cls):
+        del cls.profiles
+        del cls.phases
+        del cls.ds
+
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
         self.curdir = os.getcwd()

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/visualization/volume_rendering/tests/test_varia.py
--- a/yt/visualization/volume_rendering/tests/test_varia.py
+++ b/yt/visualization/volume_rendering/tests/test_varia.py
@@ -46,6 +46,7 @@
         if self.use_tmpdir:
             os.chdir(self.curdir)
             shutil.rmtree(self.tmpdir)
+        del self.ds
 
     def test_simple_scene_creation(self):
         yt.create_scene(self.ds)
@@ -72,7 +73,7 @@
         im, sc = yt.volume_render(self.ds)
 
         angle = 2 * np.pi
-        frames = 10
+        frames = 4
         for i in range(frames):
             sc.camera.yaw(angle / frames)
             sc.render()

diff -r c9cb34e46c9a2cb35a3513358d43f6bcb2037c88 -r 47035307ac10bcc65b031904da2b40b1ac12d10c yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -81,7 +81,6 @@
 
         im = sc.render()
         im.write_png("composite.png")
-        return im
 
     def test_nonrectangular_add(self):
         rgba1 = np.ones((64, 1, 4))


https://bitbucket.org/yt_analysis/yt/commits/7b90a91386ff/
Changeset:   7b90a91386ff
User:        ngoldbaum
Date:        2017-05-05 20:54:06+00:00
Summary:     get rid of setUpClass to reduce peak memory usage
Affected #:  3 files

diff -r 47035307ac10bcc65b031904da2b40b1ac12d10c -r 7b90a91386ffb717a0d5841f6a519bdec38b5f2c yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -167,8 +167,16 @@
 
 class TestParticlePhasePlotSave(unittest.TestCase):
 
-    @classmethod
-    def setUpClass(cls):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
+
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
+
+    def test_particle_phase_plot(self):
         test_ds = fake_particle_ds()
         data_sources = [test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3),
                         test_ds.all_data()]
@@ -196,9 +204,13 @@
                                     n_bins=[16, 16])
 
                 particle_phases.append(ParticlePhasePlot.from_profile(pp))
+        particle_phases[0]._repr_html_()
+        for p in particle_phases:
+            for fname in TEST_FLNMS:
+                assert assert_fname(p.save(fname)[0])
 
-        cls.particle_phases = particle_phases
-        cls.ds = test_ds
+
+class TestParticleProjectionPlotSave(unittest.TestCase):
 
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
@@ -209,107 +221,47 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
-    @classmethod
-    def tearDownClass(cls):
-        del cls.ds
-        del cls.particle_phases
+    def test_particle_plot(self):
+        test_ds = fake_particle_ds()
+        for dim in range(3):
+            pplot = ParticleProjectionPlot(test_ds, dim, "particle_mass")
+            for fname in TEST_FLNMS:
+                assert assert_fname(pplot.save(fname)[0])
 
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_particle_phase_plot(self, fname):
-        for p in self.particle_phases:
-            assert assert_fname(p.save(fname)[0])
-
-    def test_ipython_repr(self):
-        self.particle_phases[0]._repr_html_()
-
-
-class TestParticleProjectionPlotSave(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
+    def test_particle_plot_ds(self):
         test_ds = fake_particle_ds()
         ds_region = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
-        pplots = []
-        pplots_ds = []
-        pplots_c = []
-        pplots_wf = []
-        pplots_w = {}
-        pplots_m = []
         for dim in range(3):
-            pplots.append(ParticleProjectionPlot(test_ds, dim,
-                                                 "particle_mass"))
-
-            pplots_ds.append(ParticleProjectionPlot(test_ds, dim,
-                                                    "particle_mass",
-                                                    data_source=ds_region))
-
-        for center in CENTER_SPECS:
-            pplots_c.append(ParticleProjectionPlot(test_ds, dim,
-                                                   "particle_mass",
-                                                    center=center))
+            pplot_ds = ParticleProjectionPlot(
+                test_ds, dim, "particle_mass", data_source=ds_region)
+            pplot_ds.save()
 
-        for width in WIDTH_SPECS:
-            pplots_w[width] = ParticleProjectionPlot(test_ds, dim,
-                                                     'particle_mass',
-                                                     width=width)
-        for wf in WEIGHT_FIELDS:
-            pplots_wf.append(ParticleProjectionPlot(test_ds, dim,
-                                                    "particle_mass",
-                                                    weight_field=wf))
-        cls.pplots = pplots
-        cls.pplots_ds = pplots_ds
-        cls.pplots_c = pplots_c
-        cls.pplots_wf = pplots_wf
-        cls.pplots_w = pplots_w
-        cls.pplots_m = pplots_m
-
-    def setUp(self):
-        self.tmpdir = tempfile.mkdtemp()
-        self.curdir = os.getcwd()
-        os.chdir(self.tmpdir)
+    def test_particle_plot_c(self):
+        test_ds = fake_particle_ds()
+        for center in CENTER_SPECS:
+            for dim in range(3):
+                pplot_c = ParticleProjectionPlot(
+                    test_ds, dim, "particle_mass", center=center)
+                pplot_c.save()
 
-    def tearDown(self):
-        os.chdir(self.curdir)
-        shutil.rmtree(self.tmpdir)
-
-    @classmethod
-    def tearDownClass(cls):
-        del cls.pplots
-        del cls.pplots_ds
-        del cls.pplots_c
-        del cls.pplots_wf
-        del cls.pplots_w
-        del cls.pplots_m
-
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_particle_plot(self, dim, fname):
-        assert assert_fname(self.pplots[dim].save(fname)[0])
-
-    @parameterized.expand([(0, ), (1, ), (2, )])
-    def test_particle_plot_ds(self, dim):
-        self.pplots_ds[dim].save()
+    def test_particle_plot_wf(self):
+        test_ds = fake_particle_ds()
+        for dim in range(3):
+            for weight_field in WEIGHT_FIELDS:
+                pplot_wf = ParticleProjectionPlot(
+                    test_ds, dim, "particle_mass", weight_field=weight_field)
+                pplot_wf.save()
 
-    @parameterized.expand([(i, ) for i in range(len(CENTER_SPECS))])
-    def test_particle_plot_c(self, dim):
-        self.pplots_c[dim].save()
-
-    @parameterized.expand([(i, ) for i in range(len(WEIGHT_FIELDS))])
-    def test_particle_plot_wf(self, dim):
-        self.pplots_wf[dim].save()
+    def test_creation_with_width(self):
+        test_ds = fake_particle_ds()
+        for width, (xlim, ylim, pwidth, aun) in WIDTH_SPECS.items():
+            plot = ParticleProjectionPlot(
+                test_ds, 0, 'particle_mass', width=width)
 
-    @parameterized.expand(
-        param.explicit((width, ))
-        for width in WIDTH_SPECS)
-    def test_creation_with_width(self, width):
-        xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
-        plot = self.pplots_w[width]
+            xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
+            ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
+            pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
 
-        xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
-        ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
-        pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
-
-        [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
-        [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
-        [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
+            [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
+            [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
+            [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]

diff -r 47035307ac10bcc65b031904da2b40b1ac12d10c -r 7b90a91386ffb717a0d5841f6a519bdec38b5f2c yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -310,45 +310,7 @@
 
 
 class TestPlotWindowSave(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        test_ds = fake_random_ds(64)
-        normal = [1, 1, 1]
-        ds_region = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
-        projections = []
-        projections_ds = []
-        projections_c = []
-        projections_wf = []
-        projections_w = {}
-        projections_m = []
-        for dim in range(3):
-            projections.append(ProjectionPlot(test_ds, dim, "density"))
-            projections_ds.append(ProjectionPlot(test_ds, dim, "density",
-                                                 data_source=ds_region))
-        for center in CENTER_SPECS:
-            projections_c.append(ProjectionPlot(test_ds, dim, "density",
-                                                center=center))
-        for width in WIDTH_SPECS:
-            projections_w[width] = ProjectionPlot(test_ds, dim, 'density',
-                                                  width=width)
-        for wf in WEIGHT_FIELDS:
-            projections_wf.append(ProjectionPlot(test_ds, dim, "density",
-                                                 weight_field=wf))
-        for m in PROJECTION_METHODS:
-            projections_m.append(ProjectionPlot(test_ds, dim, "density",
-                                                 method=m))
-
-        cls.slices = [SlicePlot(test_ds, dim, "density") for dim in range(3)]
-        cls.projections = projections
-        cls.projections_ds = projections_ds
-        cls.projections_c = projections_c
-        cls.projections_wf = projections_wf
-        cls.projections_w = projections_w
-        cls.projections_m = projections_m
-        cls.offaxis_slice = OffAxisSlicePlot(test_ds, normal, "density")
-        cls.offaxis_proj = OffAxisProjectionPlot(test_ds, normal, "density")
-
+        
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
         self.curdir = os.getcwd()
@@ -358,77 +320,76 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
-    @classmethod
-    def tearDownClass(cls):
-        del cls.slc
-        del cls.slices
-        del cls.projections
-        del cls.projections_ds
-        del cls.projections_c
-        del cls.projections_wf
-        del cls.projections_w
-        del cls.projection_m
-        del cls.offaxis_slice
-        del cls.offaxis_proj
+    def test_slice_plot(self):
+        test_ds = fake_random_ds(16)
+        for dim in range(3):
+            slc = SlicePlot(test_ds, dim, 'density')
+            for fname in TEST_FLNMS:
+                assert assert_fname(slc.save(fname)[0])
+
+    def test_repr_html(self):
+        test_ds = fake_random_ds(16)
+        slc = SlicePlot(test_ds, 0, 'density')
+        slc._repr_html_()
 
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_slice_plot(self, dim, fname):
-        assert assert_fname(self.slices[dim].save(fname)[0])
+    def test_projection_plot(self):
+        test_ds = fake_random_ds(16)
+        for dim in range(3):
+            proj = ProjectionPlot(test_ds, dim, 'density')
+            for fname in TEST_FLNMS:
+                assert assert_fname(proj.save(fname)[0])
 
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_projection_plot(self, dim, fname):
-        assert assert_fname(self.projections[dim].save(fname)[0])
+    def test_projection_plot_ds(self):
+        test_ds = fake_random_ds(16)
+        reg = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
+        for dim in range(3):
+            proj = ProjectionPlot(test_ds, dim, 'density', data_source=reg)
+            proj.save()
 
-    @parameterized.expand([(0, ), (1, ), (2, )])
-    def test_projection_plot_ds(self, dim):
-        self.projections_ds[dim].save()
-
-    @parameterized.expand([(i, ) for i in range(len(CENTER_SPECS))])
-    def test_projection_plot_c(self, dim):
-        self.projections_c[dim].save()
+    def test_projection_plot_c(self):
+        test_ds = fake_random_ds(16)
+        for center in CENTER_SPECS:
+            proj = ProjectionPlot(test_ds, 0, 'density', center=center)
+            proj.save()
 
-    @parameterized.expand([(i, ) for i in range(len(WEIGHT_FIELDS))])
-    def test_projection_plot_wf(self, dim):
-        self.projections_wf[dim].save()
+    def test_projection_plot_wf(self):
+        test_ds = fake_random_ds(16)
+        for wf in WEIGHT_FIELDS:
+            proj = ProjectionPlot(test_ds, 0, 'density', weight_field=wf)
+            proj.save()
 
-    @parameterized.expand([(i, ) for i in range(len(PROJECTION_METHODS))])
-    def test_projection_plot_m(self, dim):
-        self.projections_m[dim].save()
+    def test_projection_plot_m(self):
+        test_ds = fake_random_ds(16)
+        for method in PROJECTION_METHODS:
+            proj = ProjectionPlot(test_ds, 0, 'density', method=method)
+            proj.save()
 
-    @parameterized.expand(
-        param.explicit((fname, ))
-        for fname in TEST_FLNMS)
-    def test_offaxis_slice_plot(self, fname):
-        assert assert_fname(self.offaxis_slice.save(fname)[0])
+    def test_offaxis_slice_plot(self):
+        test_ds = fake_random_ds(16)
+        slc = OffAxisSlicePlot(test_ds, [1, 1, 1], "density")
+        for fname in TEST_FLNMS:
+            assert assert_fname(slc.save(fname)[0])
 
-    @parameterized.expand(
-        param.explicit((fname, ))
-        for fname in TEST_FLNMS)
-    def test_offaxis_projection_plot(self, fname):
-        assert assert_fname(self.offaxis_proj.save(fname)[0])
-
-    def test_ipython_repr(self):
-        self.slices[0]._repr_html_()
+    def test_offaxis_projection_plot(self):
+        test_ds = fake_random_ds(16)
+        prj = OffAxisProjectionPlot(test_ds, [1, 1, 1], "density")
+        for fname in TEST_FLNMS:
+            assert assert_fname(prj.save(fname)[0])
 
-    @parameterized.expand(
-        param.explicit((width, ))
-        for width in WIDTH_SPECS)
-    def test_creation_with_width(self, width):
-        xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
-        plot = self.projections_w[width]
+    def test_creation_with_width(self):
+        test_ds = fake_random_ds(16)
+        for width in WIDTH_SPECS:
+            xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
+            plot = ProjectionPlot(test_ds, 0, 'density', width=width)
 
-        xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
-        ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
-        pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
+            xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
+            ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
+            pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
 
-        [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
-        [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
-        [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
-        assert_true(aun == plot._axes_unit_names)
+            [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
+            [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
+            [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
+            assert_true(aun == plot._axes_unit_names)
 
 def test_on_off_compare():
     # fake density field that varies in the x-direction only

diff -r 47035307ac10bcc65b031904da2b40b1ac12d10c -r 7b90a91386ffb717a0d5841f6a519bdec38b5f2c yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -71,21 +71,25 @@
 
 class TestProfilePlotSave(unittest.TestCase):
 
-    @classmethod
-    def setUpClass(cls):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
+
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
+
+    def test_profile_plot(self):
         fields = ('density', 'temperature', 'velocity_x', 'velocity_y',
                   'velocity_z')
         units = ('g/cm**3', 'K', 'cm/s', 'cm/s', 'cm/s')
-        test_ds = fake_random_ds(64, fields=fields, units=units)
+        test_ds = fake_random_ds(16, fields=fields, units=units)
         regions = [test_ds.region([0.5]*3, [0.4]*3, [0.6]*3), test_ds.all_data()]
-        profiles = []
-        phases = []
         pr_fields = [('density', 'temperature'), ('density', 'velocity_x'),
                      ('temperature', 'cell_mass'), ('density', 'radius'),
                      ('velocity_magnitude', 'cell_mass')]
-        ph_fields = [('density', 'temperature', 'cell_mass'),
-                     ('density', 'velocity_x', 'cell_mass'),
-                     ('radius', 'temperature', 'velocity_magnitude')]
+        profiles = []
         for reg in regions:
             for x_field, y_field in pr_fields:
                 profiles.append(ProfilePlot(reg, x_field, y_field))
@@ -93,6 +97,26 @@
                                             fractional=True, accumulation=True))
                 p1d = create_profile(reg, x_field, y_field)
                 profiles.append(ProfilePlot.from_profiles(p1d))
+        p1 = create_profile(test_ds.all_data(), 'density', 'temperature')
+        p2 = create_profile(test_ds.all_data(), 'density', 'velocity_x')
+        profiles.append(ProfilePlot.from_profiles(
+            [p1, p2], labels=['temperature', 'velocity']))
+        profiles[0]._repr_html_()
+        for p in profiles:
+            for fname in TEST_FLNMS:
+                assert_fname(p.save(fname)[0])
+
+    def test_phase_plot(self):
+        fields = ('density', 'temperature', 'velocity_x', 'velocity_y',
+                  'velocity_z')
+        units = ('g/cm**3', 'K', 'cm/s', 'cm/s', 'cm/s')
+        test_ds = fake_random_ds(16, fields=fields, units=units)
+        regions = [test_ds.region([0.5]*3, [0.4]*3, [0.6]*3), test_ds.all_data()]
+        phases = []
+        ph_fields = [('density', 'temperature', 'cell_mass'),
+                     ('density', 'velocity_x', 'cell_mass'),
+                     ('radius', 'temperature', 'velocity_magnitude')]
+        for reg in regions:
             for x_field, y_field, z_field in ph_fields:
                 # set n_bins to [16, 16] since matplotlib's postscript
                 # renderer is slow when it has to write a lot of polygons
@@ -113,45 +137,10 @@
         assert_array_almost_equal(xlim, (0.3, 0.8))
         assert_array_almost_equal(ylim, (0.4, 0.6))
         phases.append(pp)
-
-        p1 = create_profile(test_ds.all_data(), 'density', 'temperature')
-        p2 = create_profile(test_ds.all_data(), 'density', 'velocity_x')
-        profiles.append(ProfilePlot.from_profiles(
-            [p1, p2], labels=['temperature', 'velocity']))
-
-        cls.profiles = profiles
-        cls.phases = phases
-        cls.ds = test_ds
-
-    @classmethod
-    def tearDownClass(cls):
-        del cls.profiles
-        del cls.phases
-        del cls.ds
-
-    def setUp(self):
-        self.tmpdir = tempfile.mkdtemp()
-        self.curdir = os.getcwd()
-        os.chdir(self.tmpdir)
-
-    def tearDown(self):
-        os.chdir(self.curdir)
-        shutil.rmtree(self.tmpdir)
-
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_profile_plot(self, fname):
-        for p in self.profiles:
-            assert_fname(p.save(fname)[0])
-
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_phase_plot(self, fname):
-        for p in self.phases:
-            assert_fname(p.save(fname)[0])
-
-    def test_ipython_repr(self):
-        self.profiles[0]._repr_html_()
-        self.phases[0]._repr_html_()
-
+        phases[0]._repr_html_()
+        for p in phases:
+            for fname in TEST_FLNMS:
+                assert_fname(p.save(fname)[0])
 
 ETC46 = "enzo_tiny_cosmology/DD0046/DD0046"
 


https://bitbucket.org/yt_analysis/yt/commits/df9593e481cb/
Changeset:   df9593e481cb
User:        ngoldbaum
Date:        2017-05-05 22:06:59+00:00
Summary:     attempt to fix VR memory leak, still broken
Affected #:  2 files

diff -r 7b90a91386ffb717a0d5841f6a519bdec38b5f2c -r df9593e481cb4ada83c78137598f6b7d39c9253b yt/utilities/lib/image_samplers.pyx
--- a/yt/utilities/lib/image_samplers.pyx
+++ b/yt/utilities/lib/image_samplers.pyx
@@ -216,8 +216,16 @@
         self.image.vp_pos = None
         self.image.vp_dir = None
         self.image.zbuffer = None
+        self.image.image_used = None
+        self.image.mesh_lines = None
         self.image.camera_data = None
-        self.image.image_used = None
+        self.aimage = None
+        self.acenter = None
+        self.ax_vec = None
+        self.ay_vec = None
+        self.azbuffer = None
+        self.aimage_used = None
+        self.amesh_lines = None
         free(self.image)
 
 cdef class ProjectionSampler(ImageSampler):

diff -r 7b90a91386ffb717a0d5841f6a519bdec38b5f2c -r df9593e481cb4ada83c78137598f6b7d39c9253b yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -48,8 +48,8 @@
             shutil.rmtree(self.tmpdir)
 
     def test_rotation(self):
-        ds = fake_random_ds(64)
-        ds2 = fake_random_ds(64)
+        ds = fake_random_ds(32)
+        ds2 = fake_random_ds(32)
         dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
         dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
 


https://bitbucket.org/yt_analysis/yt/commits/70ffce795e61/
Changeset:   70ffce795e61
User:        ngoldbaum
Date:        2017-05-06 02:21:22+00:00
Summary:     Fix flake8
Affected #:  3 files

diff -r df9593e481cb4ada83c78137598f6b7d39c9253b -r 70ffce795e617a5802e95f6e2d8c58cb494bc23d yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -12,13 +12,11 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import itertools
 import os
 import tempfile
 import shutil
 import unittest
 from yt.data_objects.profiles import create_profile
-from yt.extern.parameterized import parameterized, param
 from yt.visualization.tests.test_plotwindow import \
     assert_fname, WIDTH_SPECS, ATTR_ARGS
 from yt.data_objects.particle_filters import add_particle_filter

diff -r df9593e481cb4ada83c78137598f6b7d39c9253b -r 70ffce795e617a5802e95f6e2d8c58cb494bc23d yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -12,7 +12,6 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import itertools
 import matplotlib
 import numpy as np
 import os
@@ -23,7 +22,6 @@
 from distutils.version import LooseVersion
 from nose.tools import assert_true
 
-from yt.extern.parameterized import parameterized, param
 from yt.testing import \
     fake_random_ds, assert_equal, assert_rel_equal, assert_array_equal, \
     assert_array_almost_equal, assert_raises

diff -r df9593e481cb4ada83c78137598f6b7d39c9253b -r 70ffce795e617a5802e95f6e2d8c58cb494bc23d yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -18,8 +18,6 @@
 import unittest
 import yt
 from yt.data_objects.profiles import create_profile
-from yt.extern.parameterized import\
-    parameterized, param
 from yt.testing import \
     fake_random_ds, \
     assert_array_almost_equal, \


https://bitbucket.org/yt_analysis/yt/commits/7b4559b80150/
Changeset:   7b4559b80150
User:        ngoldbaum
Date:        2017-05-08 20:23:49+00:00
Summary:     Merge pull request #1373 from ngoldbaum/memory-tests

Reduce the memory usage of the unit tests
Affected #:  12 files

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -26,7 +26,7 @@
 
     np.random.seed(seed=0x4d3d3d3)
 
-    dims = (8,8,1024)
+    dims = (8, 8, 128)
     v_shift = 1.0e7*u.cm/u.s
     sigma_v = 2.0e7*u.cm/u.s
     T_0 = 1.0e8*u.Kelvin

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -280,6 +280,7 @@
     b6 = bo6["index", "morton_index"]
     b6.sort()
     assert_array_equal(b6, np.setxor1d(i1, i2))
+    del ds
 
 def test_boolean_ellipsoids_no_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, no overlap)

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -90,7 +90,7 @@
 
 
 def test_arbitrary_grid():
-    for ncells in [64, 128, 256]:
+    for ncells in [32, 64]:
         for px in [0.125, 0.25, 0.55519]:
 
             particle_data = {

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -895,7 +895,7 @@
                 yield center
 
 def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False,
-             call_pdb = False):
+             call_pdb=False, module=None):
     from yt.utilities.on_demand_imports import _nose
     import sys
     from yt.utilities.logger import ytLogger as mylog
@@ -911,6 +911,8 @@
         nose_argv.append('--with-answer-testing')
     if answer_big_data:
         nose_argv.append('--answer-big-data')
+    if module:
+        nose_argv.append(module)
     initial_dir = os.getcwd()
     yt_file = os.path.abspath(__file__)
     yt_dir = os.path.dirname(yt_file)

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/utilities/lib/image_samplers.pyx
--- a/yt/utilities/lib/image_samplers.pyx
+++ b/yt/utilities/lib/image_samplers.pyx
@@ -216,8 +216,16 @@
         self.image.vp_pos = None
         self.image.vp_dir = None
         self.image.zbuffer = None
+        self.image.image_used = None
+        self.image.mesh_lines = None
         self.image.camera_data = None
-        self.image.image_used = None
+        self.aimage = None
+        self.acenter = None
+        self.ax_vec = None
+        self.ay_vec = None
+        self.azbuffer = None
+        self.aimage_used = None
+        self.amesh_lines = None
         free(self.image)
 
 cdef class ProjectionSampler(ImageSampler):

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -11,12 +11,9 @@
 import yt.utilities.flagging_methods as fm
 from yt.units.yt_array import uconcatenate
 
-def setup() :
-    pass
-
 def test_particle_generator():
     # First generate our dataset
-    domain_dims = (128, 128, 128)
+    domain_dims = (32, 32, 32)
     dens = np.zeros(domain_dims) + 0.1
     temp = 4.*np.ones(domain_dims)
     fields = {"density": (dens, 'code_mass/code_length**3'),
@@ -50,8 +47,11 @@
 
     tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
     assert(np.unique(tags).size == num_particles)
+
+    del tags
+    
     # Set up a lattice of particles
-    pdims = np.array([64,64,64])
+    pdims = np.array([32,32,32])
     def new_indices() :
         # We just add new indices onto the existing ones
         return np.arange((np.product(pdims)))+num_particles
@@ -78,6 +78,9 @@
     assert_almost_equal( ypos, ypred)
     assert_almost_equal( zpos, zpred)
 
+    del xpos, ypos, zpos
+    del xpred, ypred, zpred
+
     #Test the number of particles again
     particles2.apply_to_stream()
     particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
@@ -92,6 +95,8 @@
     tags.sort()
     assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
 
+    del tags
+
     # Test that the old particles have zero for the new field
     old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                           for i, grid in enumerate(ds.index.grids)]
@@ -99,6 +104,9 @@
                   for i, grid in enumerate(ds.index.grids)]
     assert_equal(old_particle_temps, test_zeros)
 
+    del old_particle_temps
+    del test_zeros
+
     #Now dump all of these particle fields out into a dict
     pdata = {}
     dd = ds.all_data()
@@ -114,3 +122,11 @@
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
     particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
     assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+
+    del pdata
+    del ds
+    del particles1
+    del particles2
+    del fields
+    del dens
+    del temp

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -12,13 +12,11 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import itertools
 import os
 import tempfile
 import shutil
 import unittest
 from yt.data_objects.profiles import create_profile
-from yt.extern.parameterized import parameterized, param
 from yt.visualization.tests.test_plotwindow import \
     assert_fname, WIDTH_SPECS, ATTR_ARGS
 from yt.data_objects.particle_filters import add_particle_filter
@@ -167,8 +165,16 @@
 
 class TestParticlePhasePlotSave(unittest.TestCase):
 
-    @classmethod
-    def setUpClass(cls):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
+
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
+
+    def test_particle_phase_plot(self):
         test_ds = fake_particle_ds()
         data_sources = [test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3),
                         test_ds.all_data()]
@@ -196,9 +202,13 @@
                                     n_bins=[16, 16])
 
                 particle_phases.append(ParticlePhasePlot.from_profile(pp))
+        particle_phases[0]._repr_html_()
+        for p in particle_phases:
+            for fname in TEST_FLNMS:
+                assert assert_fname(p.save(fname)[0])
 
-        cls.particle_phases = particle_phases
-        cls.ds = test_ds
+
+class TestParticleProjectionPlotSave(unittest.TestCase):
 
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
@@ -209,93 +219,47 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_particle_phase_plot(self, fname):
-        for p in self.particle_phases:
-            assert assert_fname(p.save(fname)[0])
+    def test_particle_plot(self):
+        test_ds = fake_particle_ds()
+        for dim in range(3):
+            pplot = ParticleProjectionPlot(test_ds, dim, "particle_mass")
+            for fname in TEST_FLNMS:
+                assert assert_fname(pplot.save(fname)[0])
 
-    def test_ipython_repr(self):
-        self.particle_phases[0]._repr_html_()
-
-
-class TestParticleProjectionPlotSave(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
+    def test_particle_plot_ds(self):
         test_ds = fake_particle_ds()
         ds_region = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
-        pplots = []
-        pplots_ds = []
-        pplots_c = []
-        pplots_wf = []
-        pplots_w = {}
-        pplots_m = []
         for dim in range(3):
-            pplots.append(ParticleProjectionPlot(test_ds, dim,
-                                                 "particle_mass"))
+            pplot_ds = ParticleProjectionPlot(
+                test_ds, dim, "particle_mass", data_source=ds_region)
+            pplot_ds.save()
 
-            pplots_ds.append(ParticleProjectionPlot(test_ds, dim,
-                                                    "particle_mass",
-                                                    data_source=ds_region))
-
+    def test_particle_plot_c(self):
+        test_ds = fake_particle_ds()
         for center in CENTER_SPECS:
-            pplots_c.append(ParticleProjectionPlot(test_ds, dim,
-                                                   "particle_mass",
-                                                    center=center))
-
-        for width in WIDTH_SPECS:
-            pplots_w[width] = ParticleProjectionPlot(test_ds, dim,
-                                                     'particle_mass',
-                                                     width=width)
-        for wf in WEIGHT_FIELDS:
-            pplots_wf.append(ParticleProjectionPlot(test_ds, dim,
-                                                    "particle_mass",
-                                                    weight_field=wf))
-        cls.pplots = pplots
-        cls.pplots_ds = pplots_ds
-        cls.pplots_c = pplots_c
-        cls.pplots_wf = pplots_wf
-        cls.pplots_w = pplots_w
-        cls.pplots_m = pplots_m
+            for dim in range(3):
+                pplot_c = ParticleProjectionPlot(
+                    test_ds, dim, "particle_mass", center=center)
+                pplot_c.save()
 
-    def setUp(self):
-        self.tmpdir = tempfile.mkdtemp()
-        self.curdir = os.getcwd()
-        os.chdir(self.tmpdir)
-
-    def tearDown(self):
-        os.chdir(self.curdir)
-        shutil.rmtree(self.tmpdir)
-
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_particle_plot(self, dim, fname):
-        assert assert_fname(self.pplots[dim].save(fname)[0])
-
-    @parameterized.expand([(0, ), (1, ), (2, )])
-    def test_particle_plot_ds(self, dim):
-        self.pplots_ds[dim].save()
+    def test_particle_plot_wf(self):
+        test_ds = fake_particle_ds()
+        for dim in range(3):
+            for weight_field in WEIGHT_FIELDS:
+                pplot_wf = ParticleProjectionPlot(
+                    test_ds, dim, "particle_mass", weight_field=weight_field)
+                pplot_wf.save()
 
-    @parameterized.expand([(i, ) for i in range(len(CENTER_SPECS))])
-    def test_particle_plot_c(self, dim):
-        self.pplots_c[dim].save()
-
-    @parameterized.expand([(i, ) for i in range(len(WEIGHT_FIELDS))])
-    def test_particle_plot_wf(self, dim):
-        self.pplots_wf[dim].save()
+    def test_creation_with_width(self):
+        test_ds = fake_particle_ds()
+        for width, (xlim, ylim, pwidth, aun) in WIDTH_SPECS.items():
+            plot = ParticleProjectionPlot(
+                test_ds, 0, 'particle_mass', width=width)
 
-    @parameterized.expand(
-        param.explicit((width, ))
-        for width in WIDTH_SPECS)
-    def test_creation_with_width(self, width):
-        xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
-        plot = self.pplots_w[width]
+            xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
+            ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
+            pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
 
-        xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
-        ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
-        pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
-
-        [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
-        [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
-        [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
+            [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
+            [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
+            [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -12,7 +12,6 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import itertools
 import matplotlib
 import numpy as np
 import os
@@ -23,7 +22,6 @@
 from distutils.version import LooseVersion
 from nose.tools import assert_true
 
-from yt.extern.parameterized import parameterized, param
 from yt.testing import \
     fake_random_ds, assert_equal, assert_rel_equal, assert_array_equal, \
     assert_array_almost_equal, assert_raises
@@ -237,6 +235,8 @@
     def tearDown(self):
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
+        del self.ds
+        del self.slc
 
     def test_hide_show_axes(self):
         self.slc.hide_axes()
@@ -264,6 +264,10 @@
             self.ds = fake_random_ds(64)
             self.slc = SlicePlot(self.ds, 0, "density")
 
+    def tearDown(self):
+        del self.ds
+        del self.slc
+
     def _assert_05cm(self):
         assert_array_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
                          [(YTQuantity(0.25, 'cm'), YTQuantity(0.75, 'cm')),
@@ -304,45 +308,7 @@
 
 
 class TestPlotWindowSave(unittest.TestCase):
-
-    @classmethod
-    def setUpClass(cls):
-        test_ds = fake_random_ds(64)
-        normal = [1, 1, 1]
-        ds_region = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
-        projections = []
-        projections_ds = []
-        projections_c = []
-        projections_wf = []
-        projections_w = {}
-        projections_m = []
-        for dim in range(3):
-            projections.append(ProjectionPlot(test_ds, dim, "density"))
-            projections_ds.append(ProjectionPlot(test_ds, dim, "density",
-                                                 data_source=ds_region))
-        for center in CENTER_SPECS:
-            projections_c.append(ProjectionPlot(test_ds, dim, "density",
-                                                center=center))
-        for width in WIDTH_SPECS:
-            projections_w[width] = ProjectionPlot(test_ds, dim, 'density',
-                                                  width=width)
-        for wf in WEIGHT_FIELDS:
-            projections_wf.append(ProjectionPlot(test_ds, dim, "density",
-                                                 weight_field=wf))
-        for m in PROJECTION_METHODS:
-            projections_m.append(ProjectionPlot(test_ds, dim, "density",
-                                                 method=m))
-
-        cls.slices = [SlicePlot(test_ds, dim, "density") for dim in range(3)]
-        cls.projections = projections
-        cls.projections_ds = projections_ds
-        cls.projections_c = projections_c
-        cls.projections_wf = projections_wf
-        cls.projections_w = projections_w
-        cls.projections_m = projections_m
-        cls.offaxis_slice = OffAxisSlicePlot(test_ds, normal, "density")
-        cls.offaxis_proj = OffAxisProjectionPlot(test_ds, normal, "density")
-
+        
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
         self.curdir = os.getcwd()
@@ -352,64 +318,76 @@
         os.chdir(self.curdir)
         shutil.rmtree(self.tmpdir)
 
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_slice_plot(self, dim, fname):
-        assert assert_fname(self.slices[dim].save(fname)[0])
+    def test_slice_plot(self):
+        test_ds = fake_random_ds(16)
+        for dim in range(3):
+            slc = SlicePlot(test_ds, dim, 'density')
+            for fname in TEST_FLNMS:
+                assert assert_fname(slc.save(fname)[0])
 
-    @parameterized.expand(
-        param.explicit(item)
-        for item in itertools.product(range(3), TEST_FLNMS))
-    def test_projection_plot(self, dim, fname):
-        assert assert_fname(self.projections[dim].save(fname)[0])
+    def test_repr_html(self):
+        test_ds = fake_random_ds(16)
+        slc = SlicePlot(test_ds, 0, 'density')
+        slc._repr_html_()
 
-    @parameterized.expand([(0, ), (1, ), (2, )])
-    def test_projection_plot_ds(self, dim):
-        self.projections_ds[dim].save()
-
-    @parameterized.expand([(i, ) for i in range(len(CENTER_SPECS))])
-    def test_projection_plot_c(self, dim):
-        self.projections_c[dim].save()
+    def test_projection_plot(self):
+        test_ds = fake_random_ds(16)
+        for dim in range(3):
+            proj = ProjectionPlot(test_ds, dim, 'density')
+            for fname in TEST_FLNMS:
+                assert assert_fname(proj.save(fname)[0])
 
-    @parameterized.expand([(i, ) for i in range(len(WEIGHT_FIELDS))])
-    def test_projection_plot_wf(self, dim):
-        self.projections_wf[dim].save()
+    def test_projection_plot_ds(self):
+        test_ds = fake_random_ds(16)
+        reg = test_ds.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
+        for dim in range(3):
+            proj = ProjectionPlot(test_ds, dim, 'density', data_source=reg)
+            proj.save()
 
-    @parameterized.expand([(i, ) for i in range(len(PROJECTION_METHODS))])
-    def test_projection_plot_m(self, dim):
-        self.projections_m[dim].save()
+    def test_projection_plot_c(self):
+        test_ds = fake_random_ds(16)
+        for center in CENTER_SPECS:
+            proj = ProjectionPlot(test_ds, 0, 'density', center=center)
+            proj.save()
 
-    @parameterized.expand(
-        param.explicit((fname, ))
-        for fname in TEST_FLNMS)
-    def test_offaxis_slice_plot(self, fname):
-        assert assert_fname(self.offaxis_slice.save(fname)[0])
+    def test_projection_plot_wf(self):
+        test_ds = fake_random_ds(16)
+        for wf in WEIGHT_FIELDS:
+            proj = ProjectionPlot(test_ds, 0, 'density', weight_field=wf)
+            proj.save()
 
-    @parameterized.expand(
-        param.explicit((fname, ))
-        for fname in TEST_FLNMS)
-    def test_offaxis_projection_plot(self, fname):
-        assert assert_fname(self.offaxis_proj.save(fname)[0])
+    def test_projection_plot_m(self):
+        test_ds = fake_random_ds(16)
+        for method in PROJECTION_METHODS:
+            proj = ProjectionPlot(test_ds, 0, 'density', method=method)
+            proj.save()
 
-    def test_ipython_repr(self):
-        self.slices[0]._repr_html_()
+    def test_offaxis_slice_plot(self):
+        test_ds = fake_random_ds(16)
+        slc = OffAxisSlicePlot(test_ds, [1, 1, 1], "density")
+        for fname in TEST_FLNMS:
+            assert assert_fname(slc.save(fname)[0])
 
-    @parameterized.expand(
-        param.explicit((width, ))
-        for width in WIDTH_SPECS)
-    def test_creation_with_width(self, width):
-        xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
-        plot = self.projections_w[width]
+    def test_offaxis_projection_plot(self):
+        test_ds = fake_random_ds(16)
+        prj = OffAxisProjectionPlot(test_ds, [1, 1, 1], "density")
+        for fname in TEST_FLNMS:
+            assert assert_fname(prj.save(fname)[0])
 
-        xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
-        ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
-        pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
+    def test_creation_with_width(self):
+        test_ds = fake_random_ds(16)
+        for width in WIDTH_SPECS:
+            xlim, ylim, pwidth, aun = WIDTH_SPECS[width]
+            plot = ProjectionPlot(test_ds, 0, 'density', width=width)
 
-        [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
-        [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
-        [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
-        assert_true(aun == plot._axes_unit_names)
+            xlim = [plot.ds.quan(el[0], el[1]) for el in xlim]
+            ylim = [plot.ds.quan(el[0], el[1]) for el in ylim]
+            pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth]
+
+            [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)]
+            [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)]
+            [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)]
+            assert_true(aun == plot._axes_unit_names)
 
 def test_on_off_compare():
     # fake density field that varies in the x-direction only

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -18,8 +18,6 @@
 import unittest
 import yt
 from yt.data_objects.profiles import create_profile
-from yt.extern.parameterized import\
-    parameterized, param
 from yt.testing import \
     fake_random_ds, \
     assert_array_almost_equal, \
@@ -71,21 +69,25 @@
 
 class TestProfilePlotSave(unittest.TestCase):
 
-    @classmethod
-    def setUpClass(cls):
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
+
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
+
+    def test_profile_plot(self):
         fields = ('density', 'temperature', 'velocity_x', 'velocity_y',
                   'velocity_z')
         units = ('g/cm**3', 'K', 'cm/s', 'cm/s', 'cm/s')
-        test_ds = fake_random_ds(64, fields=fields, units=units)
+        test_ds = fake_random_ds(16, fields=fields, units=units)
         regions = [test_ds.region([0.5]*3, [0.4]*3, [0.6]*3), test_ds.all_data()]
-        profiles = []
-        phases = []
         pr_fields = [('density', 'temperature'), ('density', 'velocity_x'),
                      ('temperature', 'cell_mass'), ('density', 'radius'),
                      ('velocity_magnitude', 'cell_mass')]
-        ph_fields = [('density', 'temperature', 'cell_mass'),
-                     ('density', 'velocity_x', 'cell_mass'),
-                     ('radius', 'temperature', 'velocity_magnitude')]
+        profiles = []
         for reg in regions:
             for x_field, y_field in pr_fields:
                 profiles.append(ProfilePlot(reg, x_field, y_field))
@@ -93,6 +95,26 @@
                                             fractional=True, accumulation=True))
                 p1d = create_profile(reg, x_field, y_field)
                 profiles.append(ProfilePlot.from_profiles(p1d))
+        p1 = create_profile(test_ds.all_data(), 'density', 'temperature')
+        p2 = create_profile(test_ds.all_data(), 'density', 'velocity_x')
+        profiles.append(ProfilePlot.from_profiles(
+            [p1, p2], labels=['temperature', 'velocity']))
+        profiles[0]._repr_html_()
+        for p in profiles:
+            for fname in TEST_FLNMS:
+                assert_fname(p.save(fname)[0])
+
+    def test_phase_plot(self):
+        fields = ('density', 'temperature', 'velocity_x', 'velocity_y',
+                  'velocity_z')
+        units = ('g/cm**3', 'K', 'cm/s', 'cm/s', 'cm/s')
+        test_ds = fake_random_ds(16, fields=fields, units=units)
+        regions = [test_ds.region([0.5]*3, [0.4]*3, [0.6]*3), test_ds.all_data()]
+        phases = []
+        ph_fields = [('density', 'temperature', 'cell_mass'),
+                     ('density', 'velocity_x', 'cell_mass'),
+                     ('radius', 'temperature', 'velocity_magnitude')]
+        for reg in regions:
             for x_field, y_field, z_field in ph_fields:
                 # set n_bins to [16, 16] since matplotlib's postscript
                 # renderer is slow when it has to write a lot of polygons
@@ -113,39 +135,10 @@
         assert_array_almost_equal(xlim, (0.3, 0.8))
         assert_array_almost_equal(ylim, (0.4, 0.6))
         phases.append(pp)
-
-        p1 = create_profile(test_ds.all_data(), 'density', 'temperature')
-        p2 = create_profile(test_ds.all_data(), 'density', 'velocity_x')
-        profiles.append(ProfilePlot.from_profiles(
-            [p1, p2], labels=['temperature', 'velocity']))
-
-        cls.profiles = profiles
-        cls.phases = phases
-        cls.ds = test_ds
-
-    def setUp(self):
-        self.tmpdir = tempfile.mkdtemp()
-        self.curdir = os.getcwd()
-        os.chdir(self.tmpdir)
-
-    def tearDown(self):
-        os.chdir(self.curdir)
-        shutil.rmtree(self.tmpdir)
-
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_profile_plot(self, fname):
-        for p in self.profiles:
-            assert_fname(p.save(fname)[0])
-
-    @parameterized.expand(param.explicit((fname, )) for fname in TEST_FLNMS)
-    def test_phase_plot(self, fname):
-        for p in self.phases:
-            assert_fname(p.save(fname)[0])
-
-    def test_ipython_repr(self):
-        self.profiles[0]._repr_html_()
-        self.phases[0]._repr_html_()
-
+        phases[0]._repr_html_()
+        for p in phases:
+            for fname in TEST_FLNMS:
+                assert_fname(p.save(fname)[0])
 
 ETC46 = "enzo_tiny_cosmology/DD0046/DD0046"
 

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -48,8 +48,8 @@
             shutil.rmtree(self.tmpdir)
 
     def test_rotation(self):
-        ds = fake_random_ds(64)
-        ds2 = fake_random_ds(64)
+        ds = fake_random_ds(32)
+        ds2 = fake_random_ds(32)
         dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
         dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
 

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/volume_rendering/tests/test_varia.py
--- a/yt/visualization/volume_rendering/tests/test_varia.py
+++ b/yt/visualization/volume_rendering/tests/test_varia.py
@@ -46,6 +46,7 @@
         if self.use_tmpdir:
             os.chdir(self.curdir)
             shutil.rmtree(self.tmpdir)
+        del self.ds
 
     def test_simple_scene_creation(self):
         yt.create_scene(self.ds)
@@ -72,7 +73,7 @@
         im, sc = yt.volume_render(self.ds)
 
         angle = 2 * np.pi
-        frames = 10
+        frames = 4
         for i in range(frames):
             sc.camera.yaw(angle / frames)
             sc.render()

diff -r d156390fc522026f3ce91abd04acb25c79f2e58e -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -81,7 +81,6 @@
 
         im = sc.render()
         im.write_png("composite.png")
-        return im
 
     def test_nonrectangular_add(self):
         rgba1 = np.ones((64, 1, 4))


https://bitbucket.org/yt_analysis/yt/commits/bcf00b412187/
Changeset:   bcf00b412187
User:        ngoldbaum
Date:        2017-05-01 19:20:12+00:00
Summary:     add travis builds for various python versions and OSX
Affected #:  6 files

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 .travis.yml
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,61 @@
+language: python
+sudo: false
+cache:
+  pip: true
+  directories:
+    - $HOME/.ccache  # https://github.com/travis-ci/travis-ci/issues/5853
+
+addons:
+  apt:
+    packages:
+      - libhdf5-serial-dev
+
+matrix:
+  include:
+    - python: 2.7
+    - python: 3.4
+    - python: 3.5
+    - python: 3.6
+    - os: osx
+      osx_image: xcode7.3
+      language: generic  # https://github.com/travis-ci/travis-ci/issues/2312
+      cache:
+        pip: false
+        directories:
+          - $HOME/Library/Caches/pip
+          # `cache` does not support `env`-like `global` so copy-paste from top
+          - $HOME/.ccache  # https://github.com/travis-ci/travis-ci/issues/5853
+
+before_install:
+  - |
+    if [[ $TRAVIS_OS_NAME != 'osx' ]]; then
+      pip install --upgrade virtualenv
+      python -m virtualenv venv
+      source venv/bin/activate
+      export PATH=/usr/lib/ccache:$PATH
+    else
+      brew update
+      brew install ccache hdf5
+      export PATH=/usr/local/opt/ccache/libexec:$PATH
+    fi
+    mkdir -p $HOME/.config/yt
+    echo "[yt]" > $HOME/.config/yt/ytrc
+    echo "suppressStreamLogging = True" >> $HOME/.config/yt/ytrc
+    cat $HOME/.config/yt/ytrc
+
+install:
+  - |
+    # setup environment
+    ccache -s
+    # Upgrade pip and setuptools and wheel to get clean install
+    pip install --upgrade pip
+    pip install --upgrade wheel
+    pip install --upgrade setuptools
+    # Install dependencies
+    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython
+    # install yt
+    pip install -e .
+
+script:
+  - |
+    nosetests -sv yt

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -5,7 +5,8 @@
 import unittest
 from yt.data_objects.image_array import ImageArray
 from yt.testing import \
-    assert_equal
+    assert_equal, \
+    requires_module
 
 
 def setup():
@@ -55,6 +56,7 @@
 
         assert str(new_im.units) == 'km'
 
+    @requires_module('h5py')
     def test_image_array_hdf5(self):
         myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
                   'north_vector': np.array([0., 0., 1.]),

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -1,6 +1,8 @@
 import subprocess
 import yt
 import os
+import sys
+import tempfile
 
 from yt.testing import requires_module
 
@@ -8,17 +10,19 @@
 @requires_module('flake8')
 def test_flake8():
     yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
-    output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+    output_file = os.environ.get("WORKSPACE", None) or tempfile.mkdtemp()
     output_file = os.path.join(output_file, 'flake8.out')
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
     config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir), 
                                                  'setup.cfg')
-    subprocess.call(['flake8', output_string, config_string, yt_dir])
-    
-    with open(output_file) as f:
-        flake8_output = f.readlines()
-    if flake8_output != []:
-        raise AssertionError(
-            "flake8 found style errors:\n\n%s" % "\n".join(flake8_output))
+    subprocess.call([sys.executable, '-m', 'flake8', output_string,
+                     config_string, yt_dir])
+
+    if os.path.exists(output_file):
+        with open(output_file) as f:
+            flake8_output = f.readlines()
+        if flake8_output != []:
+            raise AssertionError(
+                "flake8 found style errors:\n\n%s" % "\n".join(flake8_output))

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -39,7 +39,8 @@
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import \
-    fake_random_ds, requires_module, \
+    fake_random_ds, \
+    requires_module, \
     assert_allclose_units
 from yt.funcs import fix_length
 from yt.units.unit_symbols import \
@@ -976,7 +977,8 @@
     assert_isinstance(a[:], YTASubclass)
     assert_isinstance(a[:2], YTASubclass)
     assert_isinstance(YTASubclass(yta), YTASubclass)
-    
+
+ at requires_module('h5py')
 def test_h5_io():
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -787,7 +787,7 @@
         >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
         ...              info=myinfo)
         """
-        import h5py
+        from yt.utilities.on_demand_imports import _h5py as h5py
         from yt.extern.six.moves import cPickle as pickle
         if info is None:
             info = {}

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r bcf00b412187ddf6e3828da8c9caaf8b935eb285 yt/utilities/tests/test_config.py
--- a/yt/utilities/tests/test_config.py
+++ b/yt/utilities/tests/test_config.py
@@ -158,7 +158,8 @@
         
         my_plugin_name = _TEST_PLUGIN
         plugin_file = os.path.join(CONFIG_DIR, my_plugin_name)
-        os.remove(plugin_file)
+        if os.path.exists(plugin_file):
+            os.remove(plugin_file)
 
         old_config_dir = os.path.join(os.path.expanduser('~'), '.yt')
         for base_prefix in ('', CONFIG_DIR, old_config_dir):


https://bitbucket.org/yt_analysis/yt/commits/4cc982dcfaef/
Changeset:   4cc982dcfaef
User:        ngoldbaum
Date:        2017-05-08 21:12:41+00:00
Summary:     Merge pull request #1356 from ngoldbaum/travis

Add travis builds for various python versions and MacOS
Affected #:  6 files

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d .travis.yml
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,61 @@
+language: python
+sudo: false
+cache:
+  pip: true
+  directories:
+    - $HOME/.ccache  # https://github.com/travis-ci/travis-ci/issues/5853
+
+addons:
+  apt:
+    packages:
+      - libhdf5-serial-dev
+
+matrix:
+  include:
+    - python: 2.7
+    - python: 3.4
+    - python: 3.5
+    - python: 3.6
+    - os: osx
+      osx_image: xcode7.3
+      language: generic  # https://github.com/travis-ci/travis-ci/issues/2312
+      cache:
+        pip: false
+        directories:
+          - $HOME/Library/Caches/pip
+          # `cache` does not support `env`-like `global` so copy-paste from top
+          - $HOME/.ccache  # https://github.com/travis-ci/travis-ci/issues/5853
+
+before_install:
+  - |
+    if [[ $TRAVIS_OS_NAME != 'osx' ]]; then
+      pip install --upgrade virtualenv
+      python -m virtualenv venv
+      source venv/bin/activate
+      export PATH=/usr/lib/ccache:$PATH
+    else
+      brew update
+      brew install ccache hdf5
+      export PATH=/usr/local/opt/ccache/libexec:$PATH
+    fi
+    mkdir -p $HOME/.config/yt
+    echo "[yt]" > $HOME/.config/yt/ytrc
+    echo "suppressStreamLogging = True" >> $HOME/.config/yt/ytrc
+    cat $HOME/.config/yt/ytrc
+
+install:
+  - |
+    # setup environment
+    ccache -s
+    # Upgrade pip and setuptools and wheel to get clean install
+    pip install --upgrade pip
+    pip install --upgrade wheel
+    pip install --upgrade setuptools
+    # Install dependencies
+    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython
+    # install yt
+    pip install -e .
+
+script:
+  - |
+    nosetests -sv yt

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -5,7 +5,8 @@
 import unittest
 from yt.data_objects.image_array import ImageArray
 from yt.testing import \
-    assert_equal
+    assert_equal, \
+    requires_module
 
 
 def setup():
@@ -55,6 +56,7 @@
 
         assert str(new_im.units) == 'km'
 
+    @requires_module('h5py')
     def test_image_array_hdf5(self):
         myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
                   'north_vector': np.array([0., 0., 1.]),

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d yt/tests/test_flake8.py
--- a/yt/tests/test_flake8.py
+++ b/yt/tests/test_flake8.py
@@ -1,6 +1,8 @@
 import subprocess
 import yt
 import os
+import sys
+import tempfile
 
 from yt.testing import requires_module
 
@@ -8,17 +10,19 @@
 @requires_module('flake8')
 def test_flake8():
     yt_dir = os.path.dirname(os.path.abspath(yt.__file__))
-    output_file = os.environ.get("WORKSPACE", None) or os.getcwd()
+    output_file = os.environ.get("WORKSPACE", None) or tempfile.mkdtemp()
     output_file = os.path.join(output_file, 'flake8.out')
     if os.path.exists(output_file):
         os.remove(output_file)
     output_string = "--output-file=%s" % output_file
     config_string = "--config=%s" % os.path.join(os.path.dirname(yt_dir), 
                                                  'setup.cfg')
-    subprocess.call(['flake8', output_string, config_string, yt_dir])
-    
-    with open(output_file) as f:
-        flake8_output = f.readlines()
-    if flake8_output != []:
-        raise AssertionError(
-            "flake8 found style errors:\n\n%s" % "\n".join(flake8_output))
+    subprocess.call([sys.executable, '-m', 'flake8', output_string,
+                     config_string, yt_dir])
+
+    if os.path.exists(output_file):
+        with open(output_file) as f:
+            flake8_output = f.readlines()
+        if flake8_output != []:
+            raise AssertionError(
+                "flake8 found style errors:\n\n%s" % "\n".join(flake8_output))

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -39,7 +39,8 @@
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import \
-    fake_random_ds, requires_module, \
+    fake_random_ds, \
+    requires_module, \
     assert_allclose_units
 from yt.funcs import fix_length
 from yt.units.unit_symbols import \
@@ -976,7 +977,8 @@
     assert_isinstance(a[:], YTASubclass)
     assert_isinstance(a[:2], YTASubclass)
     assert_isinstance(YTASubclass(yta), YTASubclass)
-    
+
+ at requires_module('h5py')
 def test_h5_io():
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -787,7 +787,7 @@
         >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
         ...              info=myinfo)
         """
-        import h5py
+        from yt.utilities.on_demand_imports import _h5py as h5py
         from yt.extern.six.moves import cPickle as pickle
         if info is None:
             info = {}

diff -r 7b4559b80150a02a038ea0ff8e2a88d1075d0556 -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d yt/utilities/tests/test_config.py
--- a/yt/utilities/tests/test_config.py
+++ b/yt/utilities/tests/test_config.py
@@ -158,7 +158,8 @@
         
         my_plugin_name = _TEST_PLUGIN
         plugin_file = os.path.join(CONFIG_DIR, my_plugin_name)
-        os.remove(plugin_file)
+        if os.path.exists(plugin_file):
+            os.remove(plugin_file)
 
         old_config_dir = os.path.join(os.path.expanduser('~'), '.yt')
         for base_prefix in ('', CONFIG_DIR, old_config_dir):


https://bitbucket.org/yt_analysis/yt/commits/b3cc343ad46f/
Changeset:   b3cc343ad46f
User:        ngoldbaum
Date:        2017-05-09 14:19:03+00:00
Summary:     tweak travis config to not capture logs and to print timing info
Affected #:  1 file

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r b3cc343ad46fbbceff2b87b1c7805596cac5e08f .travis.yml
--- a/.travis.yml
+++ b/.travis.yml
@@ -52,10 +52,10 @@
     pip install --upgrade wheel
     pip install --upgrade setuptools
     # Install dependencies
-    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython
+    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython nose-timer
     # install yt
     pip install -e .
 
 script:
   - |
-    nosetests -sv yt
+    nosetests --nologcapture --with-timer -sv yt


https://bitbucket.org/yt_analysis/yt/commits/f426fd24822a/
Changeset:   f426fd24822a
User:        MatthewTurk
Date:        2017-05-09 15:38:57+00:00
Summary:     Merge pull request #1381 from ngoldbaum/travis-tweak

tweak travis config to not capture logs and to print timing info
Affected #:  1 file

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f .travis.yml
--- a/.travis.yml
+++ b/.travis.yml
@@ -52,10 +52,10 @@
     pip install --upgrade wheel
     pip install --upgrade setuptools
     # Install dependencies
-    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython
+    pip install mock numpy scipy cython matplotlib sympy fastcache nose flake8 h5py ipython nose-timer
     # install yt
     pip install -e .
 
 script:
   - |
-    nosetests -sv yt
+    nosetests --nologcapture --with-timer -sv yt


https://bitbucket.org/yt_analysis/yt/commits/ff45b13387eb/
Changeset:   ff45b13387eb
User:        ngoldbaum
Date:        2017-05-08 22:39:32+00:00
Summary:     covering and arbitrary grid do particle deposition in fortran order
Affected #:  4 files

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r ff45b13387eb2876d3baaa6dfd17ba28ac3d9240 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -741,12 +741,15 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions, kernel_name)
+        # We allocate number of zones, not number of octs. Everything inside
+        # this is fortran ordered because of the ordering in the octree deposit
+        # routines, so we reverse it here to match the convention there
+        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.copy(order="C")
+        # Fortran-ordered, so transpose.
+        return vals.transpose()
 
     def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None,
                      **kwargs):

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r ff45b13387eb2876d3baaa6dfd17ba28ac3d9240 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -351,14 +351,16 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        # We allocate number of zones, not number of octs
-        # Everything inside this is fortran ordered, so we reverse it here.
-        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
+        # We allocate number of zones, not number of octs. Everything inside
+        # this is Fortran ordered because of the ordering in the octree deposit
+        # routines, so we reverse it here to match the convention there
+        op = cls(tuple(self.ActiveDimensions[::-1]), kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
         if vals is None: return
-        return vals.transpose() # Fortran-ordered, so transpose.
+        # Fortran-ordered, so transpose.
+        return vals.transpose()
 
     def select_blocks(self, selector):
         mask = self._get_selector_mask(selector)

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r ff45b13387eb2876d3baaa6dfd17ba28ac3d9240 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -101,28 +101,27 @@
 
             ds = load_particles(particle_data)
 
-            LE = np.array([0.05, 0.05, 0.05])
-            RE = np.array([0.95, 0.95, 0.95])
-            dims = np.array([ncells, ncells, ncells])
+            for dims in ([ncells]*3, [ncells, ncells/2, ncells/4]):
+                LE = np.array([0.05, 0.05, 0.05])
+                RE = np.array([0.95, 0.95, 0.95])
+                dims = np.array(dims)
 
-            dds = (RE - LE) / dims
-            volume = ds.quan(np.product(dds), 'cm**3')
+                dds = (RE - LE) / dims
+                volume = ds.quan(np.product(dds), 'cm**3')
 
-            obj = ds.arbitrary_grid(LE, RE, dims)
-            deposited_mass = obj["deposit", "all_density"].sum() * volume
+                obj = ds.arbitrary_grid(LE, RE, dims)
+                deposited_mass = obj["deposit", "all_density"].sum() * volume
 
-            assert_equal(deposited_mass, ds.quan(1.0, 'g'))
+                assert_equal(deposited_mass, ds.quan(1.0, 'g'))
 
-            LE = np.array([0.00, 0.00, 0.00])
-            RE = np.array([0.05, 0.05, 0.05])
-            dims = np.array([ncells, ncells, ncells])
+                LE = np.array([0.00, 0.00, 0.00])
+                RE = np.array([0.05, 0.05, 0.05])
 
-            obj = ds.arbitrary_grid(LE, RE, dims)
+                obj = ds.arbitrary_grid(LE, RE, dims)
 
-            deposited_mass = obj["deposit", "all_density"].sum()
+                deposited_mass = obj["deposit", "all_density"].sum()
 
-            assert_equal(deposited_mass, 0)
-
+                assert_equal(deposited_mass, 0)
 
     # Test that we get identical results to the covering grid for unigrid data.
     # Testing AMR data is much harder.

diff -r 4cc982dcfaefe24705ca011fbd67c28dd32f053d -r ff45b13387eb2876d3baaa6dfd17ba28ac3d9240 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -11,6 +11,7 @@
     assert_array_almost_equal_nulp, \
     assert_array_equal, \
     assert_raises, \
+    assert_allclose_units, \
     requires_file
 from yt.utilities.cosmology import \
     Cosmology
@@ -347,3 +348,13 @@
     # If this is not true this means the result of field inference depends
     # on the order we did field detection, which is random in Python3
     assert_equal(ds._last_freq, (None, None))
+
+ISOGAL = 'IsolatedGalaxy/galaxy0030/galaxy0030'
+
+ at requires_file(ISOGAL)
+def test_deposit_amr():
+    ds = load(ISOGAL)
+    for i, g in enumerate(ds.index.grids):
+        gpm = g['particle_mass'].sum()
+        dpm = g['deposit', 'all_mass'].sum()
+        assert_allclose_units(gpm, dpm)


https://bitbucket.org/yt_analysis/yt/commits/bb69b6d6cd72/
Changeset:   bb69b6d6cd72
User:        ngoldbaum
Date:        2017-05-09 14:15:50+00:00
Summary:     fix bug in determining whether to throw YTBoundsDefinitionError
Affected #:  1 file

diff -r ff45b13387eb2876d3baaa6dfd17ba28ac3d9240 -r bb69b6d6cd7200f7e58d39e88d5346f78f64a76d yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -340,11 +340,11 @@
     cdef np.float64_t[:,:,:,:] field
     cdef public object ofield
     def initialize(self):
-        if not all(_ > 1 for _ in self.nvals[:-1]):
+        if not all(_ > 1 for _ in self.nvals):
             from yt.utilities.exceptions import YTBoundsDefinitionError
             raise YTBoundsDefinitionError(
                 "CIC requires minimum of 2 zones in all spatial dimensions",
-                self.nvals[:-1])
+                self.nvals)
         self.field = append_axes(
             np.zeros(self.nvals, dtype="float64", order='F'), 4)
 


https://bitbucket.org/yt_analysis/yt/commits/ea18a2a78232/
Changeset:   ea18a2a78232
User:        ngoldbaum
Date:        2017-05-09 18:35:03+00:00
Summary:     Merge pull request #1380 from ngoldbaum/c-order-deposit

Do covering grid and arbitrary grid particle deposition on F order arrays
Affected #:  5 files

diff -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f -r ea18a2a782327bbcd0a55eb6d60d29566853be86 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -741,12 +741,15 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions, kernel_name)
+        # We allocate number of zones, not number of octs. Everything inside
+        # this is fortran ordered because of the ordering in the octree deposit
+        # routines, so we reverse it here to match the convention there
+        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.copy(order="C")
+        # Fortran-ordered, so transpose.
+        return vals.transpose()
 
     def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None,
                      **kwargs):

diff -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f -r ea18a2a782327bbcd0a55eb6d60d29566853be86 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -351,14 +351,16 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        # We allocate number of zones, not number of octs
-        # Everything inside this is fortran ordered, so we reverse it here.
-        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
+        # We allocate number of zones, not number of octs. Everything inside
+        # this is Fortran ordered because of the ordering in the octree deposit
+        # routines, so we reverse it here to match the convention there
+        op = cls(tuple(self.ActiveDimensions[::-1]), kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
         if vals is None: return
-        return vals.transpose() # Fortran-ordered, so transpose.
+        # Fortran-ordered, so transpose.
+        return vals.transpose()
 
     def select_blocks(self, selector):
         mask = self._get_selector_mask(selector)

diff -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f -r ea18a2a782327bbcd0a55eb6d60d29566853be86 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -101,28 +101,27 @@
 
             ds = load_particles(particle_data)
 
-            LE = np.array([0.05, 0.05, 0.05])
-            RE = np.array([0.95, 0.95, 0.95])
-            dims = np.array([ncells, ncells, ncells])
+            for dims in ([ncells]*3, [ncells, ncells/2, ncells/4]):
+                LE = np.array([0.05, 0.05, 0.05])
+                RE = np.array([0.95, 0.95, 0.95])
+                dims = np.array(dims)
 
-            dds = (RE - LE) / dims
-            volume = ds.quan(np.product(dds), 'cm**3')
+                dds = (RE - LE) / dims
+                volume = ds.quan(np.product(dds), 'cm**3')
 
-            obj = ds.arbitrary_grid(LE, RE, dims)
-            deposited_mass = obj["deposit", "all_density"].sum() * volume
+                obj = ds.arbitrary_grid(LE, RE, dims)
+                deposited_mass = obj["deposit", "all_density"].sum() * volume
 
-            assert_equal(deposited_mass, ds.quan(1.0, 'g'))
+                assert_equal(deposited_mass, ds.quan(1.0, 'g'))
 
-            LE = np.array([0.00, 0.00, 0.00])
-            RE = np.array([0.05, 0.05, 0.05])
-            dims = np.array([ncells, ncells, ncells])
+                LE = np.array([0.00, 0.00, 0.00])
+                RE = np.array([0.05, 0.05, 0.05])
 
-            obj = ds.arbitrary_grid(LE, RE, dims)
+                obj = ds.arbitrary_grid(LE, RE, dims)
 
-            deposited_mass = obj["deposit", "all_density"].sum()
+                deposited_mass = obj["deposit", "all_density"].sum()
 
-            assert_equal(deposited_mass, 0)
-
+                assert_equal(deposited_mass, 0)
 
     # Test that we get identical results to the covering grid for unigrid data.
     # Testing AMR data is much harder.

diff -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f -r ea18a2a782327bbcd0a55eb6d60d29566853be86 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -11,6 +11,7 @@
     assert_array_almost_equal_nulp, \
     assert_array_equal, \
     assert_raises, \
+    assert_allclose_units, \
     requires_file
 from yt.utilities.cosmology import \
     Cosmology
@@ -347,3 +348,13 @@
     # If this is not true this means the result of field inference depends
     # on the order we did field detection, which is random in Python3
     assert_equal(ds._last_freq, (None, None))
+
+ISOGAL = 'IsolatedGalaxy/galaxy0030/galaxy0030'
+
+ at requires_file(ISOGAL)
+def test_deposit_amr():
+    ds = load(ISOGAL)
+    for i, g in enumerate(ds.index.grids):
+        gpm = g['particle_mass'].sum()
+        dpm = g['deposit', 'all_mass'].sum()
+        assert_allclose_units(gpm, dpm)

diff -r f426fd24822ad15d75935f1d7d6b3306dbb64c8f -r ea18a2a782327bbcd0a55eb6d60d29566853be86 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -340,11 +340,11 @@
     cdef np.float64_t[:,:,:,:] field
     cdef public object ofield
     def initialize(self):
-        if not all(_ > 1 for _ in self.nvals[:-1]):
+        if not all(_ > 1 for _ in self.nvals):
             from yt.utilities.exceptions import YTBoundsDefinitionError
             raise YTBoundsDefinitionError(
                 "CIC requires minimum of 2 zones in all spatial dimensions",
-                self.nvals[:-1])
+                self.nvals)
         self.field = append_axes(
             np.zeros(self.nvals, dtype="float64", order='F'), 4)
 


https://bitbucket.org/yt_analysis/yt/commits/de22d7c3658e/
Changeset:   de22d7c3658e
User:        ngoldbaum
Date:        2017-05-10 19:39:47+00:00
Summary:     update appveyor config

closes #1254
Affected #:  5 files

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r de22d7c3658e498c5466d5992819965aed0884aa appveyor.yml
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -4,19 +4,18 @@
 environment:
 
   global:
-      PYTHON: "C:\\Miniconda-x64"
+      PYTHON: "C:\\Miniconda3-x64"
 
   matrix:
-
-      - PYTHON_VERSION: "2.7"
-
-      - PYTHON_VERSION: "3.5"
-
+      - PYTHON_VERSION: "3.6"
 
 platform:
     -x64
 
 install:
+    - "if not exist \"%userprofile%\\.config\\yt\" mkdir %userprofile%\\.config\\yt"
+    - "echo [yt] > %userprofile%\\.config\\yt\\ytrc"
+    - "echo suppressStreamLogging = True >> %userprofile%\\.config\\yt\\ytrc"
     - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
 
     # Install the build and runtime dependencies of the project.
@@ -28,11 +27,11 @@
     - "python --version"
 
     # Install specified version of numpy and dependencies
-    - "conda install -q --yes numpy nose setuptools ipython Cython sympy h5py matplotlib"
-    - "python setup.py develop"
+    - "conda install -q --yes -c conda-forge numpy scipy nose setuptools ipython Cython sympy fastcache h5py matplotlib flake8 "
+    - "pip install -e ."
 
 # Not a .NET project
 build: false
 
 test_script:
-  - "nosetests -e test_all_fields ."
+  - "nosetests --nologcapture -sv yt"

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r de22d7c3658e498c5466d5992819965aed0884aa yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -149,7 +149,7 @@
         return buff
             
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
-        indices = np.argsort(data_source['pdx'])[::-1]
+        indices = np.argsort(data_source['pdx'])[::-1].astype(np.int_)
         buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_off_axis_cartesian(buff,
                               data_source['x'], data_source['y'],

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r de22d7c3658e498c5466d5992819965aed0884aa yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -95,13 +95,13 @@
         indexes into the grid cells which the ray crosses in order.
 
     """
-    cdef int i, I
+    cdef np.int_t i, I
     cdef np.float64_t a, b, bsqrd, twoa
     cdef np.ndarray[np.float64_t, ndim=1] p1cart, p2cart, dpcart, t, s, \
                                           rleft, rright, zleft, zright, \
                                           cleft, cright, thetaleft, thetaright, \
                                           tmleft, tpleft, tmright, tpright, tsect
-    cdef np.ndarray[np.int_t, ndim=1, cast=True] inds, tinds, sinds
+    cdef np.ndarray[np.int64_t, ndim=1, cast=True] inds, tinds, sinds
     cdef np.ndarray[np.float64_t, ndim=2] xyz, rztheta, ptemp, b1, b2, dsect
 
     # set up  points

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r de22d7c3658e498c5466d5992819965aed0884aa yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -441,7 +441,7 @@
     for j in idx:
         r = radii[j]
         r2 = int((r+0.3)*(r+0.3))  #0.3 to get nicer shape
-        ks = np.arange(-r,r+1,dtype=int)
+        ks = np.arange(-r, r+1, dtype=np.int64)
         z0 = zs[j]
         for kx in ks:
             x0 = xs[j]+kx

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r de22d7c3658e498c5466d5992819965aed0884aa yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -851,7 +851,7 @@
         yy0, yy1 = plot._axes.get_ylim()
         nx = plot.image._A.shape[1] // self.factor
         ny = plot.image._A.shape[0] // self.factor
-        indices = np.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1].astype(np.int_)
 
         pixX = np.zeros((ny, nx), dtype="f8")
         pixY = np.zeros((ny, nx), dtype="f8")


https://bitbucket.org/yt_analysis/yt/commits/9921b57c74ed/
Changeset:   9921b57c74ed
User:        MatthewTurk
Date:        2017-05-11 16:48:20+00:00
Summary:     Merge pull request #1383 from ngoldbaum/appveyor

update appveyor config, fix windows test failures
Affected #:  5 files

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r 9921b57c74edb63adf0c6faecfe158b2d84eaab0 appveyor.yml
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -4,19 +4,18 @@
 environment:
 
   global:
-      PYTHON: "C:\\Miniconda-x64"
+      PYTHON: "C:\\Miniconda3-x64"
 
   matrix:
-
-      - PYTHON_VERSION: "2.7"
-
-      - PYTHON_VERSION: "3.5"
-
+      - PYTHON_VERSION: "3.6"
 
 platform:
     -x64
 
 install:
+    - "if not exist \"%userprofile%\\.config\\yt\" mkdir %userprofile%\\.config\\yt"
+    - "echo [yt] > %userprofile%\\.config\\yt\\ytrc"
+    - "echo suppressStreamLogging = True >> %userprofile%\\.config\\yt\\ytrc"
     - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
 
     # Install the build and runtime dependencies of the project.
@@ -28,11 +27,11 @@
     - "python --version"
 
     # Install specified version of numpy and dependencies
-    - "conda install -q --yes numpy nose setuptools ipython Cython sympy h5py matplotlib"
-    - "python setup.py develop"
+    - "conda install -q --yes -c conda-forge numpy scipy nose setuptools ipython Cython sympy fastcache h5py matplotlib flake8 "
+    - "pip install -e ."
 
 # Not a .NET project
 build: false
 
 test_script:
-  - "nosetests -e test_all_fields ."
+  - "nosetests --nologcapture -sv yt"

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r 9921b57c74edb63adf0c6faecfe158b2d84eaab0 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -149,7 +149,7 @@
         return buff
             
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
-        indices = np.argsort(data_source['pdx'])[::-1]
+        indices = np.argsort(data_source['pdx'])[::-1].astype(np.int_)
         buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_off_axis_cartesian(buff,
                               data_source['x'], data_source['y'],

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r 9921b57c74edb63adf0c6faecfe158b2d84eaab0 yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -95,13 +95,13 @@
         indexes into the grid cells which the ray crosses in order.
 
     """
-    cdef int i, I
+    cdef np.int_t i, I
     cdef np.float64_t a, b, bsqrd, twoa
     cdef np.ndarray[np.float64_t, ndim=1] p1cart, p2cart, dpcart, t, s, \
                                           rleft, rright, zleft, zright, \
                                           cleft, cright, thetaleft, thetaright, \
                                           tmleft, tpleft, tmright, tpright, tsect
-    cdef np.ndarray[np.int_t, ndim=1, cast=True] inds, tinds, sinds
+    cdef np.ndarray[np.int64_t, ndim=1, cast=True] inds, tinds, sinds
     cdef np.ndarray[np.float64_t, ndim=2] xyz, rztheta, ptemp, b1, b2, dsect
 
     # set up  points

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r 9921b57c74edb63adf0c6faecfe158b2d84eaab0 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -441,7 +441,7 @@
     for j in idx:
         r = radii[j]
         r2 = int((r+0.3)*(r+0.3))  #0.3 to get nicer shape
-        ks = np.arange(-r,r+1,dtype=int)
+        ks = np.arange(-r, r+1, dtype=np.int64)
         z0 = zs[j]
         for kx in ks:
             x0 = xs[j]+kx

diff -r ea18a2a782327bbcd0a55eb6d60d29566853be86 -r 9921b57c74edb63adf0c6faecfe158b2d84eaab0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -851,7 +851,7 @@
         yy0, yy1 = plot._axes.get_ylim()
         nx = plot.image._A.shape[1] // self.factor
         ny = plot.image._A.shape[0] // self.factor
-        indices = np.argsort(plot.data['dx'])[::-1]
+        indices = np.argsort(plot.data['dx'])[::-1].astype(np.int_)
 
         pixX = np.zeros((ny, nx), dtype="f8")
         pixY = np.zeros((ny, nx), dtype="f8")

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list