[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Jan 18 07:59:17 PST 2017


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/c44c72c4f08e/
Changeset:   c44c72c4f08e
Branch:      yt
User:        MatthewTurk
Date:        2016-10-14 01:22:39+00:00
Summary:     Enable refine_by to be an array
Affected #:  1 file

diff -r fe99e1de08bab01d351c03cd3d2439f2612dbaf8 -r c44c72c4f08ea37d444f3419bf7953622a607fce yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -679,7 +679,7 @@
             cname = cls.__name__
             if cname.endswith("Base"): cname = cname[:-4]
             self._add_object_class(name, cls)
-        if self.refine_by != 2 and hasattr(self, 'proj') and \
+        if not np.all(self.refine_by == 2) and hasattr(self, 'proj') and \
             hasattr(self, 'overlap_proj'):
             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")


https://bitbucket.org/yt_analysis/yt/commits/e74ddc55db8b/
Changeset:   e74ddc55db8b
Branch:      yt
User:        MatthewTurk
Date:        2016-10-14 01:50:42+00:00
Summary:     Make fill_region accept an array for refine_by
Affected #:  3 files

diff -r c44c72c4f08ea37d444f3419bf7953622a607fce -r e74ddc55db8b7b11836c2a01815f3774eeecf503 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -663,13 +663,17 @@
                          for field in fields]
         domain_dims = self.ds.domain_dimensions.astype("int64") \
                     * self.ds.relative_refinement(0, self.level)
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.ndarray(refine_by, dtype="i8")
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             # NOTE: This usage of "refine_by" is actually *okay*, because it's
             # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
-                        domain_dims, self.ds.refine_by)
+                        domain_dims, refine_by)
         for name, v in zip(fields, output_fields):
             fi = self.ds._get_field_info(*name)
             self[name] = self.ds.arr(v, fi.units)
@@ -937,6 +941,12 @@
         if len(fields) == 0: return
         ls = self._initialize_level_state(fields)
         min_level = self._compute_minimum_level()
+        # NOTE: This usage of "refine_by" is actually *okay*, because it's
+        # being used with respect to iref, which is *already* scaled!
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.ndarray(refine_by, dtype="i8")
         for level in range(self.level + 1):
             if level < min_level:
                 self._update_level_state(ls)
@@ -951,11 +961,9 @@
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
-                # NOTE: This usage of "refine_by" is actually *okay*, because it's
-                # being used with respect to iref, which is *already* scaled!
                 tot -= fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
-                            chunk.ires, domain_dims, self.ds.refine_by)
+                            chunk.ires, domain_dims, refine_by)
             if level == 0 and tot != 0:
                 raise RuntimeError
             self._update_level_state(ls)

diff -r c44c72c4f08ea37d444f3419bf7953622a607fce -r e74ddc55db8b7b11836c2a01815f3774eeecf503 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -795,10 +795,10 @@
                 np.ndarray[np.int64_t, ndim=2] ipos,
                 np.ndarray[np.int64_t, ndim=1] ires,
                 np.ndarray[np.int64_t, ndim=1] level_dims,
-                np.int64_t refine_by = 2
+                np.ndarray[np.int64_t, ndim=1] refine_by
                 ):
     cdef int i, n
-    cdef np.int64_t tot = 0, oi, oj, ok, rf
+    cdef np.int64_t tot = 0, oi, oj, ok, rf[3]
     cdef np.int64_t iind[3]
     cdef np.int64_t oind[3]
     cdef np.int64_t dim[3]
@@ -826,15 +826,16 @@
         ofield = output_fields[n]
         ifield = input_fields[n]
         for i in range(ipos.shape[0]):
-            rf = refine_by**(output_level - ires[i])
+            for k in range(3):
+                rf[k] = refine_by[k]**(output_level - ires[i])
             for wi in range(3):
                 if offsets[0][wi] == 0: continue
                 off = (left_index[0] + level_dims[0]*(wi-1))
-                iind[0] = ipos[i, 0] * rf - off
+                iind[0] = ipos[i, 0] * rf[0] - off
                 # rf here is the "refinement factor", or, the number of zones
                 # that this zone could potentially contribute to our filled
                 # grid.
-                for oi in range(rf):
+                for oi in range(rf[0]):
                     # Now we need to apply our offset
                     oind[0] = oi + iind[0]
                     if oind[0] < 0:
@@ -844,8 +845,8 @@
                     for wj in range(3):
                         if offsets[1][wj] == 0: continue
                         off = (left_index[1] + level_dims[1]*(wj-1))
-                        iind[1] = ipos[i, 1] * rf - off
-                        for oj in range(rf):
+                        iind[1] = ipos[i, 1] * rf[1] - off
+                        for oj in range(rf[1]):
                             oind[1] = oj + iind[1]
                             if oind[1] < 0:
                                 continue
@@ -854,8 +855,8 @@
                             for wk in range(3):
                                 if offsets[2][wk] == 0: continue
                                 off = (left_index[2] + level_dims[2]*(wk-1))
-                                iind[2] = ipos[i, 2] * rf - off
-                                for ok in range(rf):
+                                iind[2] = ipos[i, 2] * rf[2] - off
+                                for ok in range(rf[2]):
                                     oind[2] = ok + iind[2]
                                     if oind[2] < 0:
                                         continue

diff -r c44c72c4f08ea37d444f3419bf7953622a607fce -r e74ddc55db8b7b11836c2a01815f3774eeecf503 yt/utilities/lib/tests/test_fill_region.py
--- a/yt/utilities/lib/tests/test_fill_region.py
+++ b/yt/utilities/lib/tests/test_fill_region.py
@@ -25,7 +25,8 @@
         ires = np.zeros(NDIM*NDIM*NDIM, "int64")
         ddims = np.array([NDIM, NDIM, NDIM], dtype="int64") * rf
         fill_region(input_fields, output_fields, level,
-                    left_index, ipos, ires, ddims, 2)
+                    left_index, ipos, ires, ddims,
+                    np.array([2, 2, 2], dtype="i8"))
         for r in range(level + 1):
             for o, i in zip(output_fields, v):
                 assert_equal( o[r::rf,r::rf,r::rf], i)


https://bitbucket.org/yt_analysis/yt/commits/2c89dcbc1914/
Changeset:   2c89dcbc1914
Branch:      yt
User:        MatthewTurk
Date:        2016-12-13 21:39:30+00:00
Summary:     Merging with upstream
Affected #:  137 files

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -33,10 +33,13 @@
 karraki at nmsu.edu = karraki at gmail.com
 hckr at eml.cc = astrohckr at gmail.com
 julian3 at illinois.edu = astrohckr at gmail.com
+aj at hckr.eml.cc = astrohckr at gmail.com
 cosmosquark = bthompson2090 at gmail.com
 chris.m.malone at lanl.gov = chris.m.malone at gmail.com
-jnaiman at ucolick.org = jnaiman
-migueld.deval = miguel at archlinux.net
+jnaiman at ucolick.org = jnaiman at cfa.harvard.edu
+jnaiman = jnaiman at cfa.harvard.edu
+migueld.deval = miguel.deval at gmail.com
+miguel at archlinux.net = miguel.deval at gmail.com
 slevy at ncsa.illinois.edu = salevy at illinois.edu
 malzraa at gmail.com = kellerbw at mcmaster.ca
 None = convert-repo
@@ -47,3 +50,10 @@
 Ben Thompson = bthompson2090 at gmail.com
 goldbaum at ucolick.org = ngoldbau at illinois.edu
 ngoldbau at ucsc.edu = ngoldbau at illinois.edu
+NTAuthority at honeypot.fritz.box = anokfireball at poseto.de
+NTAuthority at guest053.fz-rossendorf.de = anokfireball at poseto.de
+NTAuthority at guest692.fz-rossendorf.de = anokfireball at poseto.de
+Fabian Koller = anokfireball at poseto.de
+Rafael Ruggiero = rafael.ruggiero at usp.br
+john.regan at helsinki.fi = john.a.regan at durham.ac.uk
+code at andre-bubel.de = a.huebl at hzdr.de
\ No newline at end of file

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -8,11 +8,14 @@
                 Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                Robert Bradshaw (robertwb at gmail.com)
                 André-Patrick Bubel (code at andre-bubel.de)
+                Corentin Cadiou (corentin.cadiou at iap.fr)
                 Pengfei Chen (madcpf at gmail.com)
                 Yi-Hao Chen (yihaochentw at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
+                Weiguang Cui (weiguang.cui at uwa.edu.au)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
                 Bili Dong (qobilidop at gmail.com)
@@ -28,18 +31,22 @@
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 David Hannasch (David.A.Hannasch at gmail.com)
+                Axel Huebl (a.huebl at hzdr.de)
                 Cameron Hummels (chummels at gmail.com)
                 Anni Järvenpää (anni.jarvenpaa at gmail.com)
                 Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Maximilian Katz (maximilian.katz at stonybrook.edu)
                 Ben W. Keller (kellerbw at mcmaster.ca)
+                Chang-Goo Kim (changgoo at princeton.edu)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Fabian Holler (anokfireball at poseto.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
                 Meagan Lang (langmm.astro at gmail.com)
+                Erwin Tin-Hay Lau (ethlau at gmail.com)
                 Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
@@ -64,6 +71,7 @@
                 Anna Rosen (rosen at ucolick.org)
                 Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
+                Rafael Ruggiero (rafael.ruggiero at usp.br)
                 Hsi-Yu Schive (hyschive at gmail.com)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/helper_scripts/generate_doap.py
--- a/doc/helper_scripts/generate_doap.py
+++ b/doc/helper_scripts/generate_doap.py
@@ -75,47 +75,47 @@
 lastname_sort = lambda a: a.rsplit(None, 1)[-1]
 
 def get_release_tags():
-    c = hglib.open(yt_path)
-    releases = {}
-    for name, rev, node, islocal in c.tags():
-        if name.startswith("yt-"):
-            releases[name] = node
-    rr = []
-    for name, node in sorted(releases.items()):
-        date = c.log(node)[-1][-1]
-        rr.append((date, name[3:]))
+    with hglib.open(yt_path) as c:
+        releases = {}
+        for name, rev, node, islocal in c.tags():
+            if name.startswith("yt-"):
+                releases[name] = node
+        rr = []
+        for name, node in sorted(releases.items()):
+            date = c.log(node)[-1][-1]
+            rr.append((date, name[3:]))
     rr.sort()
     return [(_[1], _[0].strftime("%Y-%M-%d")) for _ in rr]
 
 def developer_names():
     cmd = hglib.util.cmdbuilder("churn", "-c")
-    c = hglib.open(yt_path)
-    emails = set([])
-    for dev in c.rawcommand(cmd).split("\n"):
-        if len(dev.strip()) == 0: continue
-        emails.add(dev.rsplit(None, 2)[0])
-    print("Generating real names for {0} emails".format(len(emails)))
-    names = set([])
-    for email in sorted(emails):
-        if email in name_ignores:
-            continue
-        if email in name_mappings:
-            names.add(name_mappings[email])
-            continue
-        cset = c.log(revrange="last(author('%s'))" % email)
-        if len(cset) == 0:
-            print("Error finding {0}".format(email))
-            realname = email
-        else:
-            realname, addr = parseaddr(cset[0][4])
-        if realname == '':
-            realname = email
-        if realname in name_mappings:
-            names.add(name_mappings[realname])
-            continue
-        realname = realname.decode('utf-8')
-        realname = realname.encode('ascii', 'xmlcharrefreplace')
-        names.add(realname)
+    with hglib.open(yt_path) as c:
+        emails = set([])
+        for dev in c.rawcommand(cmd).split("\n"):
+            if len(dev.strip()) == 0: continue
+            emails.add(dev.rsplit(None, 2)[0])
+        print("Generating real names for {0} emails".format(len(emails)))
+        names = set([])
+        for email in sorted(emails):
+            if email in name_ignores:
+                continue
+            if email in name_mappings:
+                names.add(name_mappings[email])
+                continue
+            cset = c.log(revrange="last(author('%s'))" % email)
+            if len(cset) == 0:
+                print("Error finding {0}".format(email))
+                realname = email
+            else:
+                realname, addr = parseaddr(cset[0][4])
+            if realname == '':
+                realname = email
+            if realname in name_mappings:
+                names.add(name_mappings[realname])
+                continue
+            realname = realname.decode('utf-8')
+            realname = realname.encode('ascii', 'xmlcharrefreplace')
+            names.add(realname)
     #with open("devs.txt", "w") as f:
     #    for name in sorted(names, key=lastname_sort):
     #        f.write("%s\n" % name)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1505,7 +1505,8 @@
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg -R ${YT_DIR} up -C ${BRANCH}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -64,6 +64,24 @@
 You can use this to easily explore available fields, particularly through
 tab-completion in Jupyter/IPython.
 
+It's also possible to iterate over the list of fields associated with each
+field type. For example, to print all of the ``'gas'`` fields, one might do:
+
+.. code-block:: python
+
+   for field in ds.fields.gas:
+       print(field)
+
+You can also check if a given field is associated with a field type using
+standard python syntax:
+
+.. code-block:: python
+
+   # these examples evaluate to True for a dataset that has ('gas', 'density')
+   'density' in ds.fields.gas
+   ('gas', 'density') in ds.fields.gas
+   ds.fields.gas.density in ds.fields.gas
+
 For a more programmatic method of accessing fields, you can utilize the
 ``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
 information about fields.  The full list of fields available for a dataset can

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -64,6 +64,18 @@
        print("(%f,  %f,  %f)    %f" %
              (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i]))
 
+Data objects can also be cloned; for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
+   sp_copy = sp.clone()
+
+This can be useful for when manually chunking data or exploring different field
+parameters.
+
 .. _quickly-selecting-data:
 
 Slicing Syntax for Selecting Data

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -32,10 +32,10 @@
 
 Unit tests are tests that operate on some small set of machinery, and verify
 that the machinery works.  yt uses the `Nose
-<http://nose.readthedocs.org/en/latest/>`_ framework for running unit tests.
-In practice, what this means is that we write scripts that ``yield``
-assertions, and Nose identifies those scripts, runs them, and verifies that the
-assertions are true.
+<http://nose.readthedocs.org/en/latest/>`_ framework for running unit tests.  In
+practice, what this means is that we write scripts that assert statements, and
+Nose identifies those scripts, runs them, and verifies that the assertions are
+true and the code runs without crashing.
 
 How to Run the Unit Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -104,8 +104,9 @@
    functionality and should also verify that the results are correct using
    assert statements or functions.  
 #. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
-   ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
-   captured by nose as a test that asserts that 1.0 is equal to 1.0.
+   ``argument_two``, etc.  For example ``yield my_test, 'banana', 2.0`` would be
+   captured by nose and the ``my_test`` function will be run with the provided
+   arguments.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1055,8 +1055,8 @@
    import yt
    ds = yt.load("InteractingJets/jet_000002")
 
-Currently GAMER does not assume any unit for non-cosmological simulations. To specify the units for yt,
-you need to supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality:
+For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions for
+length, time, and mass to ``load`` using the ``units_override`` functionality:
 
 .. code-block:: python
 
@@ -1698,7 +1698,9 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
-default units.  The parameters recognized are of this form:
+default units.  For example do the following, to load a Tipsy dataset whose
+path is stored in the variable ``my_filename`` with specified cosmology
+parameters:
 
 .. code-block:: python
 
@@ -1707,14 +1709,21 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-If you wish to set the default units directly, you can do so by using the
+   ds = yt.load(my_filename,
+                cosmology_parameters=cosmology_parameters)
+
+If you wish to set the unit system directly, you can do so by using the
 ``unit_base`` keyword in the load statement.
 
  .. code-block:: python
 
     import yt
+
     ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
 
+See the documentation for the
+:class:`~yt.frontends.tipsy.data_structures.TipsyDataset` class for more
+information.
 
 Loading Cosmological Simulations
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -95,22 +95,17 @@
 Running the Install Script
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To get the installation script for the ``stable`` branch of the code,
-download it using the following command:
+You can download the installation script with the following command:
 
 .. code-block:: bash
 
-  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
-
-If you wish to install a different version of yt (see :ref:`branches-of-yt`),
-replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
-the path above to get the correct install script.
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 By default, the bash install script will create a python environment based on
 the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -95,6 +95,10 @@
   IPython notebook created by ``yt notebook``.  Note that this should be an
   sha512 hash, not a plaintext password.  Starting ``yt notebook`` with no
   setting will provide instructions for setting this.
+* ``requires_ds_strict`` (default: ``'True'``): If true, answer tests wrapped
+  with :func:`~yt.utilities.answer_testing.framework.requires_ds` will raise
+  :class:`~yt.utilities.exceptions.YTOutputNotIdentified` rather than consuming
+  it if required dataset is not present.
 * ``serialize`` (default: ``'False'``): If true, perform automatic
   :ref:`object serialization <object-serialization>`
 * ``sketchfab_api_key`` (default: empty): API key for https://sketchfab.com/ for

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/visualizing/FITSImageData.ipynb
--- a/doc/source/visualizing/FITSImageData.ipynb
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -15,8 +15,7 @@
    },
    "outputs": [],
    "source": [
-    "import yt\n",
-    "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+    "import yt"
    ]
   },
   {
@@ -27,9 +26,9 @@
    },
    "outputs": [],
    "source": [
-    "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
-    "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
-    "                                                               \"time_unit\":(1.0,\"Myr\")})"
+    "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override={\"length_unit\":(1.0,\"Mpc\"),\n",
+    "                                                                   \"mass_unit\":(1.0e14,\"Msun\"),\n",
+    "                                                                   \"time_unit\":(1.0,\"Myr\")})"
    ]
   },
   {
@@ -73,7 +72,7 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+    "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
    ]
   },
   {
@@ -236,7 +235,7 @@
    "source": [
     "slc3 = ds.slice(0, 0.0)\n",
     "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
-    "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+    "fid_frb = yt.FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
    ]
   },
   {
@@ -255,7 +254,7 @@
    "outputs": [],
    "source": [
     "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
-    "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+    "fid_cvg = yt.FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
    ]
   },
   {
@@ -280,7 +279,7 @@
    },
    "outputs": [],
    "source": [
-    "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+    "fid = yt.FITSImageData.from_file(\"sloshing.fits\")\n",
     "fid.info()"
    ]
   },
@@ -299,8 +298,8 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
-    "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+    "prj_fits2 = yt.FITSProjection(ds, \"z\", [\"density\"])\n",
+    "prj_fits3 = yt.FITSImageData.from_images([prj_fits, prj_fits2])\n",
     "prj_fits3.info()"
    ]
   },
@@ -348,7 +347,27 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "So far, the FITS images we have shown have linear spatial coordinates. One may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
+    "So far, the FITS images we have shown have linear spatial coordinates. We can see this by looking at the header for one of the fields, and examining the `CTYPE1` and `CTYPE2` keywords:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits[\"temperature\"].header"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `WCSNAME` keyword is set to `\"yt\"` by default. \n",
+    "\n",
+    "However, one may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
    ]
   },
   {
@@ -368,7 +387,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "By the default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
+    "By default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
    ]
   },
   {
@@ -386,6 +405,49 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "and now the `WCSNAME` has been set to `\"celestial\"`. If you don't want to override the default WCS but to add another one, then you can make the call to `create_sky_wcs` and set `replace_old_wcs=False`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits3.create_sky_wcs(sky_center, sky_scale, ctype=[\"RA---TAN\",\"DEC--TAN\"], replace_old_wcs=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now can see that there are two WCSes in the header, with the celestial WCS keywords having the \"A\" designation:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits3[\"temperature\"].header"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Any further WCSes that are added will have \"B\", \"C\", etc."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
    ]
   },
@@ -415,22 +477,11 @@
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python [default]",
    "language": "python",
    "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.1"
   }
  },
  "nbformat": 4,

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -50,6 +50,25 @@
 colorblind/printer/grayscale-friendly plots. For more information, visit
 `http://colorbrewer2.org <http://colorbrewer2.org>`_.
 
+.. _cmocean-cmaps:
+
+Colormaps from cmocean
+~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to ``palettable``, yt will also import colormaps defined in the
+`cmocean <http://matplotlib.org/cmocean>`_ package. These colormaps are
+`perceptually uniform <http://bids.github.io/colormap/>`_ and were originally
+designed for oceanography applications, but can be used for any kind of plots.
+
+Since ``cmocean`` is not installed as a dependency of yt by default, it must be
+installed separately to access the ``cmocean`` colormaps with yt. The easiest
+way to install ``cmocean`` is via ``pip``: ``pip install cmocean``.  To access
+the colormaps in yt, simply specify the name of the ``cmocean`` colormap in any
+context where you would specify a colormap. One caveat is the ``cmocean``
+colormap ``algae``. Since yt already defines a colormap named ``algae``, the
+``cmocean`` version of ``algae`` must be specified with the name
+``algae_cmocean``.
+
 .. _custom-colormaps:
 
 Making and Viewing Custom Colormaps

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -522,6 +522,33 @@
 The same result could have been accomplished by explicitly setting the ``width``
 to ``(.01, 'Mpc')``.
 
+Set image units
+~~~~~~~~~~~~~~~
+
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_axes_unit` allows
+the customization of the units used for the image and colorbar.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'))
+   slc.set_unit('density', 'Msun/pc**3')
+   slc.save()
+
+If the unit you would like to convert to needs an equivalency, this can be
+specified via the ``equivalency`` keyword argument of ``set_unit``. For
+example, let's make a plot of the temperature field, but present it using
+an energy unit instead of a temperature unit:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'temperature', width=(10,'kpc'))
+   slc.set_unit('temperature', 'keV', equivalency='thermal')
+   slc.save()
+
 Set the plot center
 ~~~~~~~~~~~~~~~~~~~
 
@@ -643,6 +670,21 @@
    slc.set_log('x-velocity', True, linthresh=1.e1)
    slc.save()
 
+The :meth:`~yt.visualization.plot_container.ImagePlotContainer.set_background_color`
+function accepts a field name and a color (optional). If color is given, the function
+will set the plot's background color to that. If not, it will set it to the bottom
+value of the color map.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(1.5, 'Mpc'))
+   slc.set_background_color('density')
+   slc.save('bottom_colormap_background')
+   slc.set_background_color('density', color='black')
+   slc.save('black_background')
+
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.
 

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,6 +13,6 @@
 #      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,24 +32,6 @@
 except pkg_resources.DistributionNotFound:
     pass  # yay!
 
-MAPSERVER_FILES = []
-MAPSERVER_DIRS = [
-    "",
-    "leaflet",
-    "leaflet/images"
-]
-
-for subdir in MAPSERVER_DIRS:
-    dir_name = os.path.join("yt", "visualization", "mapserver", "html", subdir)
-    files = []
-    for ext in ["js", "html", "css", "png", "ico", "gif"]:
-        files += glob.glob("%s/*.%s" % (dir_name, ext))
-    MAPSERVER_FILES.append((dir_name, files))
-
-SHADERS_DIR = os.path.join("yt", "visualization", "volume_rendering", "shaders")
-SHADERS_FILES = glob.glob(os.path.join(SHADERS_DIR, "*.vertexshader")) + \
-    glob.glob(os.path.join(SHADERS_DIR, "*.fragmentshader"))
-
 VERSION = "3.4.dev0"
 
 if os.path.exists('MANIFEST'):
@@ -198,7 +180,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses", "distance_queue"
+    "amr_kdtools", "lenses", "distance_queue", "allocation_container"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(
@@ -316,6 +298,15 @@
                 fobj.write("hg_version = '%s'\n" % changeset)
         _build_py.run(self)
 
+    def get_outputs(self):
+        # http://bitbucket.org/yt_analysis/yt/issues/1296
+        outputs = _build_py.get_outputs(self)
+        outputs.append(
+            os.path.join(self.build_lib, 'yt', '__hg_version__.py')
+        )
+        return outputs
+
+
 class build_ext(_build_ext):
     # subclass setuptools extension builder to avoid importing cython and numpy
     # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
@@ -372,7 +363,7 @@
     ]
     },
     packages=find_packages(),
-    package_data = {'':['*.pxd']},
+    include_package_data = True,
     setup_requires=[
         'numpy',
         'cython>=0.24',
@@ -395,7 +386,6 @@
     license="BSD",
     zip_safe=False,
     scripts=["scripts/iyt"],
-    data_files=MAPSERVER_FILES + [(SHADERS_DIR, SHADERS_FILES)],
     ext_modules=cython_extensions + extensions,
 )
 

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -45,7 +45,7 @@
         
         if exit_code != 0:
             print("Compilation of OpenMP test code failed with the error: ")
-            print(err)
+            print(err.decode('utf8'))
             print("Disabling OpenMP support. ")
 
         # Clean up

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -6,6 +6,8 @@
 from yt.extern.six import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
+import numpy
+numpy.set_printoptions(threshold=5, edgeitems=1, precision=4)
 
 class NoseWorker(multiprocessing.Process):
 
@@ -67,7 +69,7 @@
                       if DROP_TAG not in line])
     tests = yaml.load(data)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v',
+    base_argv = ['--local-dir=%s' % answers_dir,
                  '--with-answer-testing', '--answer-big-data', '--local']
     args = []
 

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -1,45 +1,45 @@
 answer_tests:
-  local_artio_000:
+  local_artio_001:
     - yt/frontends/artio/tests/test_outputs.py
 
-  local_athena_001:
+  local_athena_002:
     - yt/frontends/athena
 
-  local_chombo_000:
+  local_chombo_002:
     - yt/frontends/chombo/tests/test_outputs.py
 
-  local_enzo_001:
+  local_enzo_003:
     - yt/frontends/enzo
 
-  local_fits_000:
+  local_fits_001:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_002:
+  local_flash_004:
     - yt/frontends/flash/tests/test_outputs.py
 
-  local_gadget_000:
+  local_gadget_001:
     - yt/frontends/gadget/tests/test_outputs.py
 
-  local_gamer_001:
+  local_gamer_002:
     - yt/frontends/gamer/tests/test_outputs.py
 
-  local_gdf_000:
+  local_gdf_001:
     - yt/frontends/gdf/tests/test_outputs.py
 
-  local_gizmo_001:
+  local_gizmo_002:
     - yt/frontends/gizmo/tests/test_outputs.py
 
-  local_halos_000:
+  local_halos_001:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]
     - yt/frontends/owls_subfind/tests/test_outputs.py
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
 
-  local_owls_000:
+  local_owls_001:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_008:
+  local_pw_012:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -47,10 +47,10 @@
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
     - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
 
-  local_tipsy_001:
+  local_tipsy_002:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_005:
+  local_varia_007:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -59,13 +59,13 @@
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
-  local_orion_000:
+  local_orion_001:
     - yt/frontends/boxlib/tests/test_orion.py
 
-  local_ramses_000:
+  local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py
 
-  local_ytdata_000:
+  local_ytdata_002:
     - yt/frontends/ytdata
 
   local_absorption_spectrum_005:
@@ -81,8 +81,6 @@
 
 other_tests:
   unittests:
-     - '-v'
      - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
-     - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -156,7 +156,9 @@
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, add_cmap, make_colormap, \
     ProfilePlot, PhasePlot, ParticlePhasePlot, \
-    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
+    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot, \
+    FITSImageData, FITSSlice, FITSProjection, FITSOffAxisSlice, \
+    FITSOffAxisProjection
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -390,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density < 0).any():
+                mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name'])
+                np.clip(column_density, 0, np.inf, out=column_density)
             if (column_density == 0).all():
                 mylog.info("Not adding line %s: insufficient column density" % line['label'])
                 continue

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -669,7 +669,29 @@
                 ds[attr] = getattr(self.cosmology, attr)
             ds["current_time"] = \
               self.cosmology.t_from_z(ds["current_redshift"])
+            if isinstance(ds["hubble_constant"], YTArray):
+                ds["hubble_constant"] = \
+                  ds["hubble_constant"].to("100*km/(Mpc*s)").d
         extra_attrs = {"data_type": "yt_light_ray"}
+
+        # save the light ray solution
+        if len(self.light_ray_solution) > 0:
+            # Convert everything to base unit system now to avoid
+            # problems with different units for each ds.
+            for s in self.light_ray_solution:
+                for f in s:
+                    if isinstance(s[f], YTArray):
+                        s[f].convert_to_base()
+            for key in self.light_ray_solution[0]:
+                if key in ["next", "previous", "index"]:
+                    continue
+                lrsa = [sol[key] for sol in self.light_ray_solution]
+                if isinstance(lrsa[-1], YTArray):
+                    to_arr = YTArray
+                else:
+                    to_arr = np.array
+                extra_attrs["light_ray_solution_%s" % key] = to_arr(lrsa)
+
         field_types = dict([(field, "grid") for field in data.keys()])
 
         # Only return LightRay elements with non-zero density

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -12,7 +12,10 @@
 
 import numpy as np
 
+from yt.convenience import \
+    load
 from yt.testing import \
+    assert_array_equal, \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
 import os
@@ -23,6 +26,19 @@
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 
+def compare_light_ray_solutions(lr1, lr2):
+    assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)
+    if len(lr1.light_ray_solution) == 0:
+        return
+    for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):
+        for field in s1:
+            if field in ["next", "previous"]:
+                continue
+            if isinstance(s1[field], np.ndarray):
+                assert_array_equal(s1[field], s2[field])
+            else:
+                assert s1[field] == s2[field]
+
 @requires_file(COSMO_PLUS)
 def test_light_ray_cosmo():
     """
@@ -39,6 +55,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -62,6 +81,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -82,6 +104,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -105,6 +130,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -130,6 +158,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -99,7 +99,7 @@
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
             dd = ds.all_data()
-            newtags = dd[idx_field].ndarray_view().astype("int64")
+            newtags = dd[idx_field].d.astype("int64")
             mask = np.in1d(newtags, indices, assume_unique=True)
             sort = np.argsort(newtags[mask])
             array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
@@ -197,7 +197,6 @@
 
         Examples
         ________
-        >>> from yt.mods import *
         >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.add_fields(["particle_mass", "particle_gpot"])
         """
@@ -247,15 +246,15 @@
                 dd = ds.all_data()
                 for field in new_particle_fields:
                     # This is easy... just get the particle fields
-                    pfield[field] = dd[fds[field]].ndarray_view()[mask][sort]
+                    pfield[field] = dd[fds[field]].d[mask][sort]
 
             if grid_fields:
                 # This is hard... must loop over grids
                 for field in grid_fields:
-                    pfield[field] = np.zeros((self.num_indices))
-                x = self["particle_position_x"][:,step].ndarray_view()
-                y = self["particle_position_y"][:,step].ndarray_view()
-                z = self["particle_position_z"][:,step].ndarray_view()
+                    pfield[field] = np.zeros(self.num_indices)
+                x = self["particle_position_x"][:,step].d
+                y = self["particle_position_y"][:,step].d
+                z = self["particle_position_z"][:,step].d
                 particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
 
                 # This will fail for non-grid index objects
@@ -375,10 +374,10 @@
         >>> trajs.write_out_h5("orbit_trajectories")                
         """
         fid = h5py.File(filename, "w")
-        fields = [field for field in sorted(self.field_data.keys())]
         fid.create_dataset("particle_indices", dtype=np.int64,
                            data=self.indices)
-        fid.create_dataset("particle_time", data=self.times)
+        fid.close()
+        self.times.write_hdf5(filename, dataset_name="particle_times")
+        fields = [field for field in sorted(self.field_data.keys())]
         for field in fields:
-            fid.create_dataset("%s" % field, data=self[field])
-        fid.close()
+            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -10,6 +10,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.funcs import issue_deprecation_warning
+
+issue_deprecation_warning("The photon_simulator module is deprecated. Please use pyXSIM "
+                          "(http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.")
+
 from .photon_models import \
      PhotonModel, \
      ThermalPhotonModel

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -31,7 +31,7 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import assert_same_wcs
+from yt.visualization.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     communication_system, parallel_root_only, get_mpi_type, \
     parallel_capable

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -13,7 +13,7 @@
 import numpy as np
 from yt.utilities.on_demand_imports import _astropy
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageData, sanitize_fits_unit
+from yt.visualization.fits_image import FITSImageData, sanitize_fits_unit
 from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
 from yt.funcs import get_pbar
 from yt.utilities.physical_constants import clight, mh

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -375,7 +375,7 @@
         >>> sky_center = (30., 45., "deg")
         >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        from yt.utilities.fits_image import FITSImageData
+        from yt.visualization.fits_image import FITSImageData
 
         dx = self.dx.in_units("kpc")
         dy = dx

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -48,6 +48,7 @@
     reconstruct_index = 'False',
     test_storage_dir = '/does/not/exist',
     test_data_dir = '/does/not/exist',
+    requires_ds_strict = 'False',
     enzo_db = '',
     hub_url = 'https://girder.hub.yt/api/v1',
     hub_api_key = '',
@@ -117,12 +118,17 @@
     with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
         cp.write(new_cfg)
 
-class YTConfigParser(configparser.ConfigParser):
+class YTConfigParser(configparser.ConfigParser, object):
     def __setitem__(self, key, val):
         self.set(key[0], key[1], val)
+
     def __getitem__(self, key):
         self.get(key[0], key[1])
 
+    def get(self, section, option, *args, **kwargs):
+        val = super(YTConfigParser, self).get(section, option, *args, **kwargs)
+        return os.path.expanduser(os.path.expandvars(val))
+
 ytcfg = YTConfigParser(ytcfg_defaults)
 ytcfg.read([_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE, 'yt.cfg'])
 if not ytcfg.has_section("yt"):

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -411,9 +411,11 @@
                 path_length_unit = self.ds.field_info[path_element_name].units
                 path_length_unit = Unit(path_length_unit,
                                         registry=self.ds.unit_registry)
-                # Only convert to CGS for path elements that aren't angles
+                # Only convert to appropriate unit system for path
+                # elements that aren't angles
                 if not path_length_unit.is_dimensionless:
-                    path_length_unit = path_length_unit.get_cgs_equivalent()
+                    path_length_unit = path_length_unit.get_base_equivalent(
+                        unit_system=self.ds.unit_system)
             if self.weight_field is None:
                 self._projected_units[field] = field_unit*path_length_unit
             else:

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -661,6 +661,8 @@
         if axis is None:
             mv, pos0, pos1, pos2 = self.quantities.max_location(field)
             return pos0, pos1, pos2
+        if isinstance(axis, string_types):
+            axis = [axis]
         rv = self.quantities.sample_at_max_field_values(field, axis)
         if len(rv) == 2:
             return rv[1]
@@ -1038,6 +1040,35 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
+    def clone(self):
+        r"""Clone a data object.
+
+        This will make a duplicate of a data object; note that the
+        `field_parameters` may not necessarily be deeply-copied.  If you modify
+        the field parameters in-place, it may or may not be shared between the
+        objects, depending on the type of object that that particular field
+        parameter is.
+
+        Notes
+        -----
+        One use case for this is to have multiple identical data objects that
+        are being chunked over in different orders.
+
+        Examples
+        --------
+
+        >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> sp = ds.sphere("c", 0.1)
+        >>> sp_clone = sp.clone()
+        >>> sp["density"]
+        >>> print sp.field_data.keys()
+        [("gas", "density")]
+        >>> print sp_clone.field_data.keys()
+        []
+        """
+        args = self.__reduce__()
+        return args[0](self.ds, *args[1][1:])[1]
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
@@ -1187,7 +1218,16 @@
         # This is an iterator that will yield the necessary chunks.
         self.get_data() # Ensure we have built ourselves
         if fields is None: fields = []
-        for chunk in self.index._chunk(self, chunking_style, **kwargs):
+        # chunk_ind can be supplied in the keyword arguments.  If it's a
+        # scalar, that'll be the only chunk that gets returned; if it's a list,
+        # those are the ones that will be.
+        chunk_ind = kwargs.pop("chunk_ind", None)
+        if chunk_ind is not None:
+            chunk_ind = ensure_list(chunk_ind)
+        for ci, chunk in enumerate(self.index._chunk(self, chunking_style,
+                                   **kwargs)):
+            if chunk_ind is not None and ci not in chunk_ind:
+                continue
             with self._chunked_read(chunk):
                 self.get_data(fields)
                 # NOTE: we yield before releasing the context
@@ -1976,6 +2016,9 @@
     return narg
 
 def _get_ds_by_hash(hash):
+    from yt.data_objects.static_output import Dataset
+    if isinstance(hash, Dataset):
+        return hash
     from yt.data_objects.static_output import _cached_datasets
     for ds in _cached_datasets.values():
         if ds._hash() == hash: return ds

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -184,8 +184,8 @@
         if dlevel != 1:
             rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = np.maximum(0, cgi / rf - gi)
-        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi // rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) // rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -93,7 +93,7 @@
     def _reshape_vals(self, arr):
         nz = self.nz
         if len(arr.shape) <= 2:
-            n_oct = arr.shape[0] / (nz**3)
+            n_oct = arr.shape[0] // (nz**3)
         else:
             n_oct = max(arr.shape)
         if arr.size == nz*nz*nz*n_oct:

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -239,10 +239,10 @@
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            units = chunk.ds.field_info[field].units
+            units = chunk.ds.field_info[field].output_units
             arr[:,i] = chunk[field][filter].in_units(units)
         if self.weight_field is not None:
-            units = chunk.ds.field_info[self.weight_field].units
+            units = chunk.ds.field_info[self.weight_field].output_units
             weight_data = chunk[self.weight_field].in_units(units)
         else:
             weight_data = np.ones(filter.size, dtype="float64")
@@ -276,7 +276,13 @@
 
     def _get_bins(self, mi, ma, n, take_log):
         if take_log:
-            return np.logspace(np.log10(mi), np.log10(ma), n+1)
+            ret = np.logspace(np.log10(mi), np.log10(ma), n+1)
+            # at this point ret[0] and ret[-1] are not exactly equal to
+            # mi and ma due to round-off error. Let's force them to be
+            # mi and ma exactly to avoid incorrectly discarding cells near
+            # the edges. See Issue #1300.
+            ret[0], ret[-1] = mi, ma
+            return ret
         else:
             return np.linspace(mi, ma, n+1)
 
@@ -1010,6 +1016,11 @@
     if extrema is None:
         ex = [data_source.quantities["Extrema"](f, non_zero=l)
               for f, l in zip(bin_fields, logs)]
+        # pad extrema by epsilon so cells at bin edges are not excluded
+        for i, (mi, ma) in enumerate(ex):
+            mi = mi - np.spacing(mi)
+            ma = ma + np.spacing(ma)
+            ex[i][0], ex[i][1] = mi, ma
     else:
         ex = []
         for bin_field in bin_fields:

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -106,8 +106,32 @@
             return self.__getattribute__(attr)
         return fnc
 
+    _field_types = None
+    @property
+    def field_types(self):
+        if self._field_types is None:
+            self._field_types = set(t for t, n in self.ds.field_info)
+        return self._field_types
+
     def __dir__(self):
-        return list(set(t for t, n in self.ds.field_info))
+        return list(self.field_types)
+
+    def __iter__(self):
+        for ft in self.field_types:
+            fnc = FieldNameContainer(self.ds, ft)
+            if len(dir(fnc)) == 0:
+                yield self.__getattribute__(ft)
+            else:
+                yield fnc
+
+    def __contains__(self, obj):
+        ob = None
+        if isinstance(obj, FieldNameContainer):
+            ob = obj.field_type
+        elif isinstance(obj, string_types):
+            ob = obj
+
+        return ob in self.field_types
 
 class FieldNameContainer(object):
     def __init__(self, ds, field_type):
@@ -125,6 +149,26 @@
         return [n for t, n in self.ds.field_info
                 if t == self.field_type]
 
+    def __iter__(self):
+        for t, n in self.ds.field_info:
+            if t == self.field_type:
+                yield self.ds.field_info[t, n]
+
+    def __contains__(self, obj):
+        if isinstance(obj, DerivedField):
+            if self.field_type == obj.name[0] and obj.name in self.ds.field_info:
+                # e.g. from a completely different dataset
+                if self.ds.field_info[obj.name] is not obj:
+                    return False
+                return True
+        elif isinstance(obj, tuple):
+            if self.field_type == obj[0] and obj in self.ds.field_info:
+                return True
+        elif isinstance(obj, string_types):
+            if (self.field_type, obj) in self.ds.field_info:
+                return True
+        return False
+
 class IndexProxy(object):
     # This is a simple proxy for Index objects.  It enables backwards
     # compatibility so that operations like .h.sphere, .h.print_stats and
@@ -183,7 +227,6 @@
     particle_types_raw = ("io",)
     geometry = "cartesian"
     coordinates = None
-    max_level = 99
     storage_filename = None
     particle_unions = None
     known_filters = None
@@ -781,8 +824,14 @@
         without having to specify a *center* value.  It assumes the center
         is the midpoint between the left_edge and right_edge.
         """
-        left_edge = np.array(left_edge)
-        right_edge = np.array(right_edge)
+        # we handle units in the region data object
+        # but need to check if left_edge or right_edge is a
+        # list or other non-array iterable before calculating
+        # the center
+        if not isinstance(left_edge, np.ndarray):
+            left_edge = np.array(left_edge)
+        if not isinstance(right_edge, np.ndarray):
+            right_edge = np.array(right_edge)
         c = (left_edge + right_edge)/2.0
         return self.region(c, left_edge, right_edge, **kwargs)
 
@@ -1268,6 +1317,18 @@
         self.field_dependencies.update(deps)
         return grad_fields
 
+    _max_level = None
+    @property
+    def max_level(self):
+        if self._max_level is None:
+            self._max_level = self.index.max_level
+
+        return self._max_level
+
+    @max_level.setter
+    def max_level(self, value):
+        self._max_level = value
+
 def _reconstruct_ds(*args, **kwargs):
     datasets = ParameterFileStore()
     ds = datasets.get_ds_hash(*args)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/tests/test_clone.py
--- /dev/null
+++ b/yt/data_objects/tests/test_clone.py
@@ -0,0 +1,24 @@
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_array_equal
+
+def test_clone_sphere():
+    # Now we test that we can get different radial velocities based on field
+    # parameters.
+
+    # Get the first sphere
+    ds = fake_random_ds(16, fields = ("density",
+      "velocity_x", "velocity_y", "velocity_z"))
+    sp0 = ds.sphere(ds.domain_center, 0.25)
+
+    assert_equal(list(sp0.keys()), [])
+
+    sp1 = sp0.clone()
+    sp0["density"]
+    assert_equal(list(sp0.keys()), (("gas","density"),))
+    assert_equal(list(sp1.keys()), [])
+
+    sp1["density"]
+
+    assert_array_equal(sp0["density"], sp1["density"])

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -1,3 +1,5 @@
+import numpy as np
+
 from yt.testing import \
     assert_equal, \
     fake_amr_ds, \
@@ -6,6 +8,25 @@
 
 # This will test the "dataset access" method.
 
+def test_box_creation():
+    ds = fake_random_ds(32, length_unit=2)
+    left_edge = ds.arr([0.2, 0.2, 0.2], 'cm')
+    right_edge = ds.arr([0.6, 0.6, 0.6], 'cm')
+    center = (left_edge + right_edge)/2
+
+    boxes = [
+        ds.box(left_edge, right_edge),
+        ds.box(0.5*np.array(left_edge), 0.5*np.array(right_edge)),
+        ds.box((0.5*left_edge).tolist(), (0.5*right_edge).tolist())
+    ]
+
+    region = ds.region(center, left_edge, right_edge)
+
+    for b in boxes:
+        assert_equal(b.left_edge, region.left_edge)
+        assert_equal(b.right_edge, region.right_edge)
+        assert_equal(b.center, region.center)
+
 def test_region_from_d():
     ds = fake_amr_ds(fields=["density"])
     # We'll do a couple here

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -193,6 +193,7 @@
 
     fp_units = {
         'bulk_velocity' : 'cm/s',
+        'bulk_magnetic_field': 'G',
         'center' : 'cm',
         'normal' : '',
         'cp_x_vec': '',
@@ -207,8 +208,9 @@
         if self.field_parameters and param in self.field_parameters:
             return self.field_parameters[param]
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity', 'center', 'normal']:
-            return self.ds.arr(np.random.random(3) * 1e-2, self.fp_units[param])
+        if param in ['center', 'normal'] or param.startswith('bulk'):
+            return self.ds.arr(
+                np.random.random(3) * 1e-2, self.fp_units[param])
         elif param in ['surface_height']:
             return self.ds.quan(0.0, 'code_length')
         elif param in ['axis']:

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -364,6 +364,10 @@
                 if field in self._show_field_errors:
                     raise
                 if type(e) != YTFieldNotFound:
+                    # if we're doing field tests, raise an error
+                    # see yt.fields.tests.test_fields
+                    if hasattr(self.ds, '_field_test_dataset'):
+                        raise
                     mylog.debug("Raises %s during field %s detection.",
                                 str(type(e)), field)
                 self.pop(field)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -145,32 +145,53 @@
     unit_system = registry.ds.unit_system
     elements = _get_all_elements(registry.species_names)
     for element in elements:
-        registry.add_field((ftype, "%s_nuclei_density" % element), sampling_type="cell", 
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           sampling_type="cell",
                            function = _nuclei_density,
                            particle_type = particle_type,
                            units = unit_system["number_density"])
-    if len(elements) == 0:
-        for element in ["H", "He"]:
-            registry.add_field((ftype, "%s_nuclei_density" % element), sampling_type="cell", 
-                               function = _default_nuclei_density,
-                               particle_type = particle_type,
-                               units = unit_system["number_density"])
+
+    for element in ["H", "He"]:
+        if element in elements:
+            continue
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           sampling_type="cell",
+                           function = _default_nuclei_density,
+                           particle_type = particle_type,
+                           units = unit_system["number_density"])
 
 def _default_nuclei_density(field, data):
+    ftype = field.name[0]
     element = field.name[1][:field.name[1].find("_")]
-    return data["gas", "density"] * _primordial_mass_fraction[element] / \
+    return data[ftype, "density"] * _primordial_mass_fraction[element] / \
       ChemicalFormula(element).weight / amu_cgs
         
 def _nuclei_density(field, data):
+    ftype = field.name[0]
     element = field.name[1][:field.name[1].find("_")]
-    field_data = np.zeros_like(data["gas", "%s_number_density" % 
+
+    nuclei_mass_field = "%s_nuclei_mass_density" % element
+    if (ftype, nuclei_mass_field) in data.ds.field_info:
+        return data[(ftype, nuclei_mass_field)] / \
+          ChemicalFormula(element).weight / amu_cgs
+    metal_field = "%s_metallicity" % element
+    if (ftype, metal_field) in data.ds.field_info:
+        return data[ftype, "density"] * data[(ftype, metal_field)] / \
+          ChemicalFormula(element).weight / amu_cgs
+
+    field_data = np.zeros_like(data[ftype, "%s_number_density" %
                                     data.ds.field_info.species_names[0]])
     for species in data.ds.field_info.species_names:
         nucleus = species
         if "_" in species:
             nucleus = species[:species.find("_")]
+        # num is the number of nuclei contributed by this species.
         num = _get_element_multiple(nucleus, element)
-        field_data += num * data["gas", "%s_number_density" % species]
+        # Since this is a loop over all species existing in this dataset,
+        # we will encounter species that contribute nothing, so we skip them.
+        if num == 0:
+            continue
+        field_data += num * data[ftype, "%s_number_density" % species]
     return field_data
 
 def _get_all_elements(species_list):

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/fields/tests/test_field_name_container.py
--- a/yt/fields/tests/test_field_name_container.py
+++ b/yt/fields/tests/test_field_name_container.py
@@ -1,13 +1,28 @@
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    data_dir_load
+from yt import \
+    load
+from yt.testing import \
+    requires_file
+
+def do_field_type(ft):
+    for field_name in dir(ft):
+        f = getattr(ft, field_name)
+        assert ((ft.field_type, field_name) == f.name)
+    for field in ft:
+        f = getattr(ft, field.name[1])
+        assert (f == field)
+        assert (f in ft)
+        assert (f.name in ft)
+        assert (f.name[1] in ft)
+
 
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
- at requires_ds(enzotiny)
-def test_simulated_halo_mass_function():
-    ds = data_dir_load(enzotiny)
+ at requires_file(enzotiny)
+def test_field_name_container():
+    ds = load(enzotiny)
     for field_type in dir(ds.fields):
+        assert (field_type in ds.fields)
         ft = getattr(ds.fields, field_type)
-        for field_name in dir(ft):
-            f = getattr(ft, field_name)
-            assert ((field_type, field_name) == f.name)
+        do_field_type(ft)
+    for field_type in ds.fields:
+        assert (field_type in ds.fields)
+        do_field_type(field_type)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -24,27 +24,6 @@
     YTFieldUnitParseError, \
     YTDimensionalityError
 
-base_ds = None
-
-
-def setup():
-    global base_ds
-    # Make this super teeny tiny
-    fields, units = [], []
-
-    for fname, (code_units, aliases, dn) in StreamFieldInfo.known_other_fields:
-        fields.append(("gas", fname))
-        units.append(code_units)
-
-    base_ds = fake_random_ds(4, fields=fields, units=units, particles=20)
-
-    base_ds.index
-    base_ds.cosmological_simulation = 1
-    base_ds.cosmology = Cosmology()
-    from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
-
 def get_params(ds):
     return dict(
         axis = 0,
@@ -52,6 +31,8 @@
             registry = ds.unit_registry),
         bulk_velocity = YTArray((0.0, 0.0, 0.0),
             "cm/s", registry = ds.unit_registry),
+        bulk_magnetic_field = YTArray((0.0, 0.0, 0.0),
+            "G", registry = ds.unit_registry),        
         normal = YTArray((0.0, 0.0, 1.0),
             "", registry = ds.unit_registry),
         cp_x_vec = YTArray((1.0, 0.0, 0.0),
@@ -71,18 +52,79 @@
                 ("gas", "velocity_y"),
                 ("gas", "velocity_z"))
 
-def realistic_ds(fields, particle_fields, nprocs):
-    np.random.seed(int(0x4d3d3d3))
-    global base_ds
-    units = [base_ds._get_field_info(*f).units for f in fields]
-    punits = [base_ds._get_field_info('io', f).units for f in particle_fields]
-    fields = [_strip_ftype(f) for f in fields]
+def _strip_ftype(field):
+    if not isinstance(field, tuple):
+        return field
+    elif field[0] in ("all", "io"):
+        return field
+    return field[1]
 
-    ds = fake_random_ds(16, fields=fields, units=units, nprocs=nprocs,
-                        particle_fields=particle_fields,
-                        particle_field_units=punits,
-                        particles=base_ds.stream_handler.particle_count[0][0])
 
+class TestFieldAccess(object):
+    description = None
+
+    def __init__(self, field_name, ds, nprocs):
+        # Note this should be a field name
+        self.field_name = field_name
+        self.description = "Accessing_%s_%s" % (field_name, nprocs)
+        self.nprocs = nprocs
+        self.ds = ds
+
+    def __call__(self):
+        field = self.ds._get_field_info(*self.field_name)
+        skip_grids = False
+        needs_spatial = False
+        for v in field.validators:
+            if getattr(v, "ghost_zones", 0) > 0:
+                skip_grids = True
+            if hasattr(v, "ghost_zones"):
+                needs_spatial = True
+
+        ds = self.ds
+
+        # This gives unequal sized grids as well as subgrids
+        dd1 = ds.all_data()
+        dd2 = ds.all_data()
+        sp = get_params(ds)
+        dd1.field_parameters.update(sp)
+        dd2.field_parameters.update(sp)
+        with np.errstate(all='ignore'):
+            v1 = dd1[self.field_name]
+            # No more conversion checking
+            assert_equal(v1, dd1[self.field_name])
+            if not needs_spatial:
+                with field.unit_registry(dd2):
+                    res = field._function(field, dd2)
+                    res = dd2.apply_units(res, field.units)
+                assert_array_almost_equal_nulp(v1, res, 4)
+            if not skip_grids:
+                for g in ds.index.grids:
+                    g.field_parameters.update(sp)
+                    v1 = g[self.field_name]
+                    g.clear_data()
+                    g.field_parameters.update(sp)
+                    r1 = field._function(field, g)
+                    if field.particle_type:
+                        assert_equal(v1.shape[0], g.NumberOfParticles)
+                    else:
+                        assert_array_equal(r1.shape, v1.shape)
+                        for ax in 'xyz':
+                            assert_array_equal(g[ax].shape, v1.shape)
+                    with field.unit_registry(g):
+                        res = field._function(field, g)
+                        assert_array_equal(v1.shape, res.shape)
+                        res = g.apply_units(res, field.units)
+                    assert_array_almost_equal_nulp(v1, res, 4)
+
+def get_base_ds(nprocs):
+    fields, units = [], []
+
+    for fname, (code_units, aliases, dn) in StreamFieldInfo.known_other_fields:
+        fields.append(("gas", fname))
+        units.append(code_units)
+
+    ds = fake_random_ds(
+        4, fields=fields, units=units, particles=20, nprocs=nprocs)
     ds.parameters["HydroMethod"] = "streaming"
     ds.parameters["EOSType"] = 1.0
     ds.parameters["EOSSoundSpeed"] = 1.0
@@ -98,108 +140,37 @@
                              omega_matter=ds.omega_matter,
                              omega_lambda=ds.omega_lambda,
                              unit_registry=ds.unit_registry)
+    # ensures field errors are raised during testing
+    # see FieldInfoContainer.check_derived_fields
+    ds._field_test_dataset = True
+    ds.index
     return ds
-
-def _strip_ftype(field):
-    if not isinstance(field, tuple):
-        return field
-    elif field[0] in ("all", "io"):
-        return field
-    return field[1]
-
-
-class TestFieldAccess(object):
-    description = None
-
-    def __init__(self, field_name, nproc):
-        # Note this should be a field name
-        self.field_name = field_name
-        self.description = "Accessing_%s_%s" % (field_name, nproc)
-        self.nproc = nproc
-
-    def __call__(self):
-        global base_ds
-        field = base_ds._get_field_info(*self.field_name)
-        deps = field.get_dependencies(ds = base_ds)
-        requested = deps.requested
-        particle_fields = \
-            ['particle_position_x', 'particle_position_y', 'particle_position_z',
-             'particle_velocity_x', 'particle_velocity_y', 'particle_velocity_z',
-             'particle_mass']
-        fields = list(_base_fields)
-
-        for rf in requested:
-            if rf[0] == 'io' or rf[0] == 'all':
-                if rf not in particle_fields or rf[1] not in particle_fields:
-                    particle_fields.append(rf[1])
-            else:
-                fields.append(rf)
+    
+def test_all_fields():
+    datasets = {}
+        
+    for nprocs in [1, 4, 8]:
+        ds = get_base_ds(nprocs)
+        datasets[nprocs] = ds
 
-        skip_grids = False
-        needs_spatial = False
-        for v in field.validators:
-            f = getattr(v, "fields", None)
-            if f: fields += f
-            if getattr(v, "ghost_zones", 0) > 0:
-                skip_grids = True
-            if hasattr(v, "ghost_zones"):
-                needs_spatial = True
-
-        ds = realistic_ds(fields, particle_fields, self.nproc)
-
-        # This gives unequal sized grids as well as subgrids
-        dd1 = ds.all_data()
-        dd2 = ds.all_data()
-        sp = get_params(ds)
-        dd1.field_parameters.update(sp)
-        dd2.field_parameters.update(sp)
-        v1 = dd1[self.field_name]
-        # No more conversion checking
-        assert_equal(v1, dd1[self.field_name])
-        if not needs_spatial:
-            with field.unit_registry(dd2):
-                res = field._function(field, dd2)
-                res = dd2.apply_units(res, field.units)
-            assert_array_almost_equal_nulp(v1, res, 4)
-        if not skip_grids:
-            for g in ds.index.grids:
-                g.field_parameters.update(sp)
-                v1 = g[self.field_name]
-                g.clear_data()
-                g.field_parameters.update(sp)
-                r1 = field._function(field, g)
-                if field.particle_type:
-                    assert_equal(v1.shape[0], g.NumberOfParticles)
-                else:
-                    assert_array_equal(r1.shape, v1.shape)
-                    for ax in 'xyz':
-                        assert_array_equal(g[ax].shape, v1.shape)
-                with field.unit_registry(g):
-                    res = field._function(field, g)
-                    assert_array_equal(v1.shape, res.shape)
-                    res = g.apply_units(res, field.units)
-                assert_array_almost_equal_nulp(v1, res, 4)
-
-def test_all_fields():
-    global base_ds
-    for field in sorted(base_ds.field_info):
+    for field in sorted(ds.field_info):
         if field[1].find("beta_p") > -1:
             continue
         if field[1].find("vertex") > -1:
             # don't test the vertex fields for now
             continue
-        if field in base_ds.field_list:
+        if field in ds.field_list:
             # Don't know how to test this.  We need some way of having fields
             # that are fallbacks be tested, but we don't have that now.
             continue
 
-        for nproc in [1, 4, 8]:
-            test_all_fields.__name__ = "%s_%s" % (field, nproc)
-            yield TestFieldAccess(field, nproc)
+        for nprocs in [1, 4, 8]:
+            test_all_fields.__name__ = "%s_%s" % (field, nprocs)
+            yield TestFieldAccess(field, datasets[nprocs], nprocs)
 
 def test_add_deposited_particle_field():
     # NOT tested: "std", "mesh_id", "nearest" and "simple_smooth"
-    global base_ds
+    base_ds = get_base_ds(1)
     ad = base_ds.all_data()
 
     # Test "count", "sum" and "cic" method
@@ -226,14 +197,14 @@
     ds = load('GadgetDiskGalaxy/snapshot_200.hdf5')
     fn = ds.add_smoothed_particle_field(('PartType0', 'particle_ones'))
     assert_equal(fn, ('deposit', 'PartType0_smoothed_particle_ones'))
-    ad = ds.all_data()
-    ret = ad[fn]
-    assert_almost_equal(ret.sum(), 3824750.912653606)
+    dd = ds.sphere('center', (500, 'code_length'))
+    ret = dd[fn]
+    assert_almost_equal(ret.sum(), 638.5652315154682)
 
 def test_add_gradient_fields():
-    global base_ds
-    gfields = base_ds.add_gradient_fields(("gas","density"))
-    gfields += base_ds.add_gradient_fields(("index", "ones"))
+    ds = get_base_ds(1)
+    gfields = ds.add_gradient_fields(("gas","density"))
+    gfields += ds.add_gradient_fields(("index", "ones"))
     field_list = [('gas', 'density_gradient_x'),
                   ('gas', 'density_gradient_y'),
                   ('gas', 'density_gradient_z'),
@@ -243,7 +214,7 @@
                   ('index', 'ones_gradient_z'),
                   ('index', 'ones_gradient_magnitude')]
     assert_equal(gfields, field_list)
-    ad = base_ds.all_data()
+    ad = ds.all_data()
     for field in field_list:
         ret = ad[field]
         if field[0] == 'gas':
@@ -346,12 +317,3 @@
     a1 = np.argsort(mi)
     a2 = np.argsort(mi2)
     assert_array_equal(a1, a2)
-
-if __name__ == "__main__":
-    setup()
-    for t in test_all_fields():
-        t()
-    test_add_deposited_particle_field()
-    test_add_field_unit_semantics()
-    test_array_like_field()
-    test_add_field_string()

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -7,7 +7,7 @@
     SelectorObject, AlwaysSelector, OctreeSubsetSelector
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
-    SparseOctreeContainer
+    SparseOctreeContainer, OctObjectPool
 from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
@@ -923,7 +923,7 @@
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
         self.artio_handle = range_handler.artio_handle
         self.level_offset = 1
-        self.domains = NULL
+        self.domains = OctObjectPool()
         self.root_nodes = NULL
 
     @cython.boundscheck(False)
@@ -949,7 +949,7 @@
 
         # We only allow one root oct.
         self.append_domain(oct_count)
-        self.domains[self.num_domains - 1].con_id = sfc
+        self.domains.containers[self.num_domains - 1].con_id = sfc
 
         oct_ind = -1
         ipos = 0
@@ -1009,7 +1009,7 @@
         source_arrays = []
         ipos = -1
         for i in range(self.num_domains):
-            ipos = imax(ipos, self.domains[i].n)
+            ipos = imax(ipos, self.domains.containers[i].n)
         for i in range(nf):
             field_ind[i] = field_indices[i]
             # Note that we subtract one, because we're not using the root mesh.
@@ -1029,13 +1029,13 @@
         #     double-loop to calculate domain_counts
         # The cons should be in order
         cdef np.int64_t sfc_start, sfc_end
-        sfc_start = self.domains[0].con_id
-        sfc_end = self.domains[self.num_domains - 1].con_id
+        sfc_start = self.domains.containers[0].con_id
+        sfc_end = self.domains.containers[self.num_domains - 1].con_id
         status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end)
         check_artio_status(status)
         cdef np.int64_t offset = 0
         for si in range(self.num_domains):
-            sfc = self.domains[si].con_id
+            sfc = self.domains.containers[si].con_id
             status = artio_grid_read_root_cell_begin( handle, sfc,
                     dpos, NULL, &num_oct_levels, num_octs_per_level)
             check_artio_status(status)

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -126,8 +126,8 @@
         if dlevel != 1:
             raise NotImplementedError
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = np.maximum(0, cgi / rf - gi)
-        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi // rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) // rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],

diff -r e74ddc55db8b7b11836c2a01815f3774eeecf503 -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -587,6 +587,10 @@
                      len(potential_outputs))
 
         my_outputs = {}
+        llevel = mylog.level
+        # suppress logging as we load every dataset, unless set to debug
+        if llevel > 10 and llevel < 40:
+            mylog.setLevel(40)
         for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
@@ -609,6 +613,7 @@
                             my_storage.result['redshift'] = ds.current_redshift
                 except YTOutputNotIdentified:
                     mylog.error('Failed to load %s', filename)
+        mylog.setLevel(llevel)
         my_outputs = [my_output for my_output in my_outputs.values() \
                       if my_output is not None]
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/88e7e624bf1e/
Changeset:   88e7e624bf1e
Branch:      yt
User:        MatthewTurk
Date:        2016-12-13 22:10:27+00:00
Summary:     Update docstring, add tests
Affected #:  2 files

diff -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 -r 88e7e624bf1ea4c1920797e435ce6bdbb73d1a8e yt/data_objects/tests/test_refinement.py
--- /dev/null
+++ b/yt/data_objects/tests/test_refinement.py
@@ -0,0 +1,50 @@
+from yt.testing import \
+    fake_amr_ds, \
+    assert_array_equal, \
+    assert_equal
+import yt
+import numpy as np
+
+def setup_fake_refby():
+    refine_by=np.array([5, 1, 1])
+    top_grid_dim = [100,  10, 2]
+    n1=100
+    n2=10
+    n3=2
+
+    grid_data = [
+        dict(left_edge = [0.0, 0.0, 0.0],
+             right_edge = [1.0, np.pi, np.pi*2.],
+             level = 0,
+             dimensions = np.array([n1, n2, n3])),
+        dict(left_edge = [0., 0., 0.],
+             right_edge = [0.5, np.pi, np.pi*2.],
+             level = 1,
+             dimensions = refine_by*[n1/2.0, n2, n3]),
+    ]
+
+    for g in grid_data:
+        g["density"] = (np.random.random(g["dimensions"].astype("i8")),
+                        "g/cm**3")
+    bbox = np.array([[0.0, 1.0], [0.0, np.pi], [0.0, np.pi*2]])
+
+    ds = yt.load_amr_grids(grid_data, top_grid_dim,
+                           bbox = bbox, geometry='spherical',
+                           refine_by=refine_by, length_unit='kpc')
+    return ds
+
+def test_refine_by():
+    ds = setup_fake_refby()
+    dd = ds.all_data()
+    # This checks that we always refine_by 1 in dimensions 2 and 3
+    dims = ds.domain_dimensions*ds.refine_by**ds.max_level
+    for i in range(1, 3):
+        # Check the refine_by == 1
+        ncoords = np.unique(dd.icoords[:,i]).size
+        assert_equal(ncoords, dims[i])
+    for g in ds.index.grids:
+        dims = ds.domain_dimensions*ds.refine_by**g.Level
+        # Now we can check converting back to the reference space
+        v = ((g.icoords + 1) / dims.astype("f8")).max(axis=0)
+        v *= ds.domain_width
+        assert_array_equal(v, g.RightEdge.d)

diff -r 2c89dcbc19141ac854400458dcb1b9e13dd0a3f0 -r 88e7e624bf1ea4c1920797e435ce6bdbb73d1a8e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -786,8 +786,12 @@
         be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
         can be done for other coordinates, for instance:
         ("spherical", ("theta", "phi", "r")).
-    refine_by : integer
-        Specifies the refinement ratio between levels.  Defaults to 2.
+    refine_by : integer or list/array of integers.
+        Specifies the refinement ratio between levels.  Defaults to 2.  This
+        can be an array, in which case it specifies for each dimension.  For
+        instance, this can be used to say that some datasets have refinement of
+        1 in one dimension, indicating that they span the full range in that
+        dimension.
 
     Examples
     --------


https://bitbucket.org/yt_analysis/yt/commits/96c94ca09b9f/
Changeset:   96c94ca09b9f
Branch:      yt
User:        MatthewTurk
Date:        2016-12-14 21:49:52+00:00
Summary:     Fixing some of the errors in fields
Affected #:  1 file

diff -r 88e7e624bf1ea4c1920797e435ce6bdbb73d1a8e -r 96c94ca09b9f3f5cb561efaba1c02cec578e6e24 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -668,7 +668,7 @@
         refine_by = self.ds.refine_by
         if not iterable(self.ds.refine_by):
             refine_by = [refine_by, refine_by, refine_by]
-        refine_by = np.ndarray(refine_by, dtype="i8")
+        refine_by = np.array(refine_by, dtype="i8")
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             # NOTE: This usage of "refine_by" is actually *okay*, because it's
@@ -948,7 +948,7 @@
         refine_by = self.ds.refine_by
         if not iterable(self.ds.refine_by):
             refine_by = [refine_by, refine_by, refine_by]
-        refine_by = np.ndarray(refine_by, dtype="i8")
+        refine_by = np.array(refine_by, dtype="i8")
         for level in range(self.level + 1):
             if level < min_level:
                 self._update_level_state(ls)


https://bitbucket.org/yt_analysis/yt/commits/038dc4d9bcd9/
Changeset:   038dc4d9bcd9
Branch:      yt
User:        MatthewTurk
Date:        2016-12-15 15:27:25+00:00
Summary:     Fixing flake8
Affected #:  1 file

diff -r 96c94ca09b9f3f5cb561efaba1c02cec578e6e24 -r 038dc4d9bcd9fe8484f9a2b71f5964915ddc5397 yt/data_objects/tests/test_refinement.py
--- a/yt/data_objects/tests/test_refinement.py
+++ b/yt/data_objects/tests/test_refinement.py
@@ -1,5 +1,4 @@
 from yt.testing import \
-    fake_amr_ds, \
     assert_array_equal, \
     assert_equal
 import yt


https://bitbucket.org/yt_analysis/yt/commits/771123590278/
Changeset:   771123590278
Branch:      yt
User:        xarthisius
Date:        2017-01-18 15:58:47+00:00
Summary:     Merged in MatthewTurk/yt (pull request #2418)

Enable refine_by to be an array
Affected #:  6 files

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -666,13 +666,17 @@
                          for field in fields]
         domain_dims = self.ds.domain_dimensions.astype("int64") \
                     * self.ds.relative_refinement(0, self.level)
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             # NOTE: This usage of "refine_by" is actually *okay*, because it's
             # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
-                        domain_dims, self.ds.refine_by)
+                        domain_dims, refine_by)
         for name, v in zip(fields, output_fields):
             fi = self.ds._get_field_info(*name)
             self[name] = self.ds.arr(v, fi.units)
@@ -940,6 +944,12 @@
         if len(fields) == 0: return
         ls = self._initialize_level_state(fields)
         min_level = self._compute_minimum_level()
+        # NOTE: This usage of "refine_by" is actually *okay*, because it's
+        # being used with respect to iref, which is *already* scaled!
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for level in range(self.level + 1):
             if level < min_level:
                 self._update_level_state(ls)
@@ -954,11 +964,9 @@
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
-                # NOTE: This usage of "refine_by" is actually *okay*, because it's
-                # being used with respect to iref, which is *already* scaled!
                 tot -= fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
-                            chunk.ires, domain_dims, self.ds.refine_by)
+                            chunk.ires, domain_dims, refine_by)
             if level == 0 and tot != 0:
                 raise RuntimeError
             self._update_level_state(ls)

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,7 +717,7 @@
             cname = cls.__name__
             if cname.endswith("Base"): cname = cname[:-4]
             self._add_object_class(name, cls)
-        if self.refine_by != 2 and hasattr(self, 'proj') and \
+        if not np.all(self.refine_by == 2) and hasattr(self, 'proj') and \
             hasattr(self, 'overlap_proj'):
             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/data_objects/tests/test_refinement.py
--- /dev/null
+++ b/yt/data_objects/tests/test_refinement.py
@@ -0,0 +1,49 @@
+from yt.testing import \
+    assert_array_equal, \
+    assert_equal
+import yt
+import numpy as np
+
+def setup_fake_refby():
+    refine_by=np.array([5, 1, 1])
+    top_grid_dim = [100,  10, 2]
+    n1=100
+    n2=10
+    n3=2
+
+    grid_data = [
+        dict(left_edge = [0.0, 0.0, 0.0],
+             right_edge = [1.0, np.pi, np.pi*2.],
+             level = 0,
+             dimensions = np.array([n1, n2, n3])),
+        dict(left_edge = [0., 0., 0.],
+             right_edge = [0.5, np.pi, np.pi*2.],
+             level = 1,
+             dimensions = refine_by*[n1/2.0, n2, n3]),
+    ]
+
+    for g in grid_data:
+        g["density"] = (np.random.random(g["dimensions"].astype("i8")),
+                        "g/cm**3")
+    bbox = np.array([[0.0, 1.0], [0.0, np.pi], [0.0, np.pi*2]])
+
+    ds = yt.load_amr_grids(grid_data, top_grid_dim,
+                           bbox = bbox, geometry='spherical',
+                           refine_by=refine_by, length_unit='kpc')
+    return ds
+
+def test_refine_by():
+    ds = setup_fake_refby()
+    dd = ds.all_data()
+    # This checks that we always refine_by 1 in dimensions 2 and 3
+    dims = ds.domain_dimensions*ds.refine_by**ds.max_level
+    for i in range(1, 3):
+        # Check the refine_by == 1
+        ncoords = np.unique(dd.icoords[:,i]).size
+        assert_equal(ncoords, dims[i])
+    for g in ds.index.grids:
+        dims = ds.domain_dimensions*ds.refine_by**g.Level
+        # Now we can check converting back to the reference space
+        v = ((g.icoords + 1) / dims.astype("f8")).max(axis=0)
+        v *= ds.domain_width
+        assert_array_equal(v, g.RightEdge.d)

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -786,8 +786,12 @@
         be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
         can be done for other coordinates, for instance:
         ("spherical", ("theta", "phi", "r")).
-    refine_by : integer
-        Specifies the refinement ratio between levels.  Defaults to 2.
+    refine_by : integer or list/array of integers.
+        Specifies the refinement ratio between levels.  Defaults to 2.  This
+        can be an array, in which case it specifies for each dimension.  For
+        instance, this can be used to say that some datasets have refinement of
+        1 in one dimension, indicating that they span the full range in that
+        dimension.
 
     Examples
     --------

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -813,10 +813,10 @@
                 np.ndarray[np.int64_t, ndim=2] ipos,
                 np.ndarray[np.int64_t, ndim=1] ires,
                 np.ndarray[np.int64_t, ndim=1] level_dims,
-                np.int64_t refine_by = 2
+                np.ndarray[np.int64_t, ndim=1] refine_by
                 ):
     cdef int i, n
-    cdef np.int64_t tot = 0, oi, oj, ok, rf
+    cdef np.int64_t tot = 0, oi, oj, ok, rf[3]
     cdef np.int64_t iind[3]
     cdef np.int64_t oind[3]
     cdef np.int64_t dim[3]
@@ -844,15 +844,16 @@
         ofield = output_fields[n]
         ifield = input_fields[n]
         for i in range(ipos.shape[0]):
-            rf = refine_by**(output_level - ires[i])
+            for k in range(3):
+                rf[k] = refine_by[k]**(output_level - ires[i])
             for wi in range(3):
                 if offsets[0][wi] == 0: continue
                 off = (left_index[0] + level_dims[0]*(wi-1))
-                iind[0] = ipos[i, 0] * rf - off
+                iind[0] = ipos[i, 0] * rf[0] - off
                 # rf here is the "refinement factor", or, the number of zones
                 # that this zone could potentially contribute to our filled
                 # grid.
-                for oi in range(rf):
+                for oi in range(rf[0]):
                     # Now we need to apply our offset
                     oind[0] = oi + iind[0]
                     if oind[0] < 0:
@@ -862,8 +863,8 @@
                     for wj in range(3):
                         if offsets[1][wj] == 0: continue
                         off = (left_index[1] + level_dims[1]*(wj-1))
-                        iind[1] = ipos[i, 1] * rf - off
-                        for oj in range(rf):
+                        iind[1] = ipos[i, 1] * rf[1] - off
+                        for oj in range(rf[1]):
                             oind[1] = oj + iind[1]
                             if oind[1] < 0:
                                 continue
@@ -872,8 +873,8 @@
                             for wk in range(3):
                                 if offsets[2][wk] == 0: continue
                                 off = (left_index[2] + level_dims[2]*(wk-1))
-                                iind[2] = ipos[i, 2] * rf - off
-                                for ok in range(rf):
+                                iind[2] = ipos[i, 2] * rf[2] - off
+                                for ok in range(rf[2]):
                                     oind[2] = ok + iind[2]
                                     if oind[2] < 0:
                                         continue

diff -r fc3d1369fd2821a203b0bae4b94b53915254f468 -r 771123590278f97c40911a6bc318174de90cdd0e yt/utilities/lib/tests/test_fill_region.py
--- a/yt/utilities/lib/tests/test_fill_region.py
+++ b/yt/utilities/lib/tests/test_fill_region.py
@@ -25,7 +25,8 @@
         ires = np.zeros(NDIM*NDIM*NDIM, "int64")
         ddims = np.array([NDIM, NDIM, NDIM], dtype="int64") * rf
         fill_region(input_fields, output_fields, level,
-                    left_index, ipos, ires, ddims, 2)
+                    left_index, ipos, ires, ddims,
+                    np.array([2, 2, 2], dtype="i8"))
         for r in range(level + 1):
             for o, i in zip(output_fields, v):
                 assert_equal( o[r::rf,r::rf,r::rf], i)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list