[yt-svn] commit/yt: 52 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 7 11:06:32 PDT 2016


52 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/430491110c2b/
Changeset:   430491110c2b
Branch:      yt
User:        brittonsmith
Date:        2015-06-23 16:04:11+00:00
Summary:     Adding ability to get non-periodic segments from subvolumes.  This is quite a hack.
Affected #:  1 file

diff -r 1c339ea7619997cba6842e973cf5f0e13b2f30ee -r 430491110c2b7ba0f0e33ec6a9a67e96f5869708 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -177,6 +177,7 @@
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
+                                      left_edge=None, right_edge=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -236,21 +237,28 @@
                         (box_fraction_used +
                          self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                     # Random start point
-                    self.light_ray_solution[q]['start'] = np.random.random(3)
-                    theta = np.pi * np.random.random()
-                    phi = 2 * np.pi * np.random.random()
-                    box_fraction_used = 0.0
+                    if left_edge is not None and right_edge is not None:
+                        self.light_ray_solution[q]['start'], \
+                          self.light_ray_solution[q]['end'] = \
+                          non_periodic_ray(left_edge, right_edge,
+                            self.light_ray_solution[q]['traversal_box_fraction'])
+                    else:
+                        self.light_ray_solution[q]['start'] = np.random.random(3)
+                        theta = np.pi * np.random.random()
+                        phi = 2 * np.pi * np.random.random()
+                        box_fraction_used = 0.0
                 else:
                     # Use end point of previous segment and same theta and phi.
                     self.light_ray_solution[q]['start'] = \
                       self.light_ray_solution[q-1]['end'][:]
 
-                self.light_ray_solution[q]['end'] = \
-                  self.light_ray_solution[q]['start'] + \
-                    self.light_ray_solution[q]['traversal_box_fraction'] * \
-                    np.array([np.cos(phi) * np.sin(theta),
-                              np.sin(phi) * np.sin(theta),
-                              np.cos(theta)])
+                if "end" not in self.light_ray_solution[q]:
+                    self.light_ray_solution[q]['end'] = \
+                      self.light_ray_solution[q]['start'] + \
+                        self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        np.array([np.cos(phi) * np.sin(theta),
+                                  np.sin(phi) * np.sin(theta),
+                                  np.cos(theta)])
                 box_fraction_used += \
                   self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -262,6 +270,7 @@
                             'near_redshift':self.near_redshift})
 
     def make_light_ray(self, seed=None,
+                       left_edge=None, right_edge=None, 
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,
@@ -382,6 +391,7 @@
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
+                                           left_edge=left_edge, right_edge=right_edge,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
@@ -745,3 +755,22 @@
         t += dt
 
     return segments
+
+def non_periodic_ray(left_edge, right_edge, ray_length, max_iter=500):
+    i = 0
+    while True:
+        start = np.random.random(3) * \
+          (right_edge - left_edge) + left_edge
+        theta = np.pi * np.random.random()
+        phi = 2 * np.pi * np.random.random()
+        end = start + ray_length * \
+          np.array([np.cos(phi) * np.sin(theta),
+                    np.sin(phi) * np.sin(theta),
+                    np.cos(theta)])
+        i += 1
+        if (end >= left_edge).all() and (end <= right_edge).all():
+            #mylog.info("Found ray after %d attempts." % i)
+            return start, end
+        if i > max_iter:
+            mylog.info("Exceed iteration limit.")
+            return None, None


https://bitbucket.org/yt_analysis/yt/commits/258f18da597e/
Changeset:   258f18da597e
Branch:      yt
User:        brittonsmith
Date:        2015-07-25 05:59:51+00:00
Summary:     Adding a min level check.
Affected #:  1 file

diff -r 430491110c2b7ba0f0e33ec6a9a67e96f5869708 -r 258f18da597e656c4980140e9ad4b4f08e1612bb yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -238,10 +238,12 @@
                          self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
                     # Random start point
                     if left_edge is not None and right_edge is not None:
+                        ds = load(self.light_ray_solution[q]["filename"])
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
-                          non_periodic_ray(left_edge, right_edge,
+                          non_periodic_ray(ds, left_edge, right_edge,
                             self.light_ray_solution[q]['traversal_box_fraction'])
+                        del ds
                     else:
                         self.light_ray_solution[q]['start'] = np.random.random(3)
                         theta = np.pi * np.random.random()
@@ -756,7 +758,7 @@
 
     return segments
 
-def non_periodic_ray(left_edge, right_edge, ray_length, max_iter=500):
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=2):
     i = 0
     while True:
         start = np.random.random(3) * \
@@ -768,9 +770,13 @@
                     np.sin(phi) * np.sin(theta),
                     np.cos(theta)])
         i += 1
-        if (end >= left_edge).all() and (end <= right_edge).all():
-            #mylog.info("Found ray after %d attempts." % i)
-            return start, end
+        test_ray = ds.ray(start, end)
+        if (end >= left_edge).all() and (end <= right_edge).all() and \
+                (test_ray["grid_level"] >= min_level).all():
+            mylog.info("Found ray after %d attempts." % i)
+            del test_ray
+            return start, end.d
+        del test_ray
         if i > max_iter:
             mylog.info("Exceed iteration limit.")
             return None, None


https://bitbucket.org/yt_analysis/yt/commits/fbbe122409ab/
Changeset:   fbbe122409ab
Branch:      yt
User:        brittonsmith
Date:        2015-10-14 16:26:22+00:00
Summary:     Using an actual RandomState instance so non-periodic rays are reproducible.
Affected #:  1 file

diff -r 258f18da597e656c4980140e9ad4b4f08e1612bb -r fbbe122409abd9ce438f24a7a79730dc1898fcbc yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -183,7 +183,7 @@
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        np.random.seed(seed)
+        my_random = np.random.RandomState(seed)
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
@@ -242,12 +242,13 @@
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
                           non_periodic_ray(ds, left_edge, right_edge,
-                            self.light_ray_solution[q]['traversal_box_fraction'])
+                            self.light_ray_solution[q]['traversal_box_fraction'],
+                                           my_random=my_random)
                         del ds
                     else:
-                        self.light_ray_solution[q]['start'] = np.random.random(3)
-                        theta = np.pi * np.random.random()
-                        phi = 2 * np.pi * np.random.random()
+                        self.light_ray_solution[q]['start'] = my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
                         box_fraction_used = 0.0
                 else:
                     # Use end point of previous segment and same theta and phi.
@@ -758,13 +759,16 @@
 
     return segments
 
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=2):
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=2,
+                     my_random=None):
+    if my_random is None:
+        my_random = np.random.RandomState()
     i = 0
     while True:
-        start = np.random.random(3) * \
+        start = my_random.random_sample(3) * \
           (right_edge - left_edge) + left_edge
-        theta = np.pi * np.random.random()
-        phi = 2 * np.pi * np.random.random()
+        theta = np.pi * my_random.random_sample()
+        phi = 2 * np.pi * my_random.random_sample()
         end = start + ray_length * \
           np.array([np.cos(phi) * np.sin(theta),
                     np.sin(phi) * np.sin(theta),


https://bitbucket.org/yt_analysis/yt/commits/21445a666405/
Changeset:   21445a666405
Branch:      yt
User:        brittonsmith
Date:        2016-07-15 13:35:02+00:00
Summary:     Adding min_level keyword to make_light_ray.
Affected #:  1 file

diff -r fbbe122409abd9ce438f24a7a79730dc1898fcbc -r 21445a66640527a2b86ba6f145e843cab21c61ba yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -177,7 +177,7 @@
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
-                                      left_edge=None, right_edge=None,
+                                      left_edge=None, right_edge=None, min_level=0,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -243,7 +243,7 @@
                           self.light_ray_solution[q]['end'] = \
                           non_periodic_ray(ds, left_edge, right_edge,
                             self.light_ray_solution[q]['traversal_box_fraction'],
-                                           my_random=my_random)
+                                           my_random=my_random, min_level=min_level)
                         del ds
                     else:
                         self.light_ray_solution[q]['start'] = my_random.random_sample(3)
@@ -395,6 +395,7 @@
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
                                            left_edge=left_edge, right_edge=right_edge,
+                                           min_level=min_level,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
@@ -759,7 +760,7 @@
 
     return segments
 
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=2,
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=0,
                      my_random=None):
     if my_random is None:
         my_random = np.random.RandomState()


https://bitbucket.org/yt_analysis/yt/commits/2b7062ab4930/
Changeset:   2b7062ab4930
Branch:      yt
User:        brittonsmith
Date:        2015-11-02 12:24:10+00:00
Summary:     Adding ability to specify field parameters to light ray.
Affected #:  1 file

diff -r 21445a66640527a2b86ba6f145e843cab21c61ba -r 2b7062ab493092bc77fdd9f15a954733ec521820 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -278,8 +278,8 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=None, use_peculiar_velocity=True, 
-                       redshift=None, njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True,
+                       redshift=None, field_parameters=None, njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
@@ -401,6 +401,9 @@
                                            trajectory=trajectory,
                                            filename=solution_filename)
 
+        if field_parameters is None:
+            field_parameters = {}
+
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
@@ -487,6 +490,8 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = ds.ray(sub_segment[0], sub_segment[1])
+                for key, val in field_parameters.items():
+                    sub_ray.set_field_parameter(key, val)
                 asort = np.argsort(sub_ray["t"])
                 sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,


https://bitbucket.org/yt_analysis/yt/commits/40a896fad1f4/
Changeset:   40a896fad1f4
Branch:      yt
User:        brittonsmith
Date:        2016-07-15 14:03:29+00:00
Summary:     Allow min_level to be None to bypass level check.
Affected #:  1 file

diff -r 2b7062ab493092bc77fdd9f15a954733ec521820 -r 40a896fad1f4b1cdfec3c9681a757ac97a8303c4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -177,7 +177,7 @@
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
-                                      left_edge=None, right_edge=None, min_level=0,
+                                      left_edge=None, right_edge=None, min_level=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -765,8 +765,8 @@
 
     return segments
 
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=0,
-                     my_random=None):
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500,
+                     min_level=None, my_random=None):
     if my_random is None:
         my_random = np.random.RandomState()
     i = 0
@@ -782,7 +782,7 @@
         i += 1
         test_ray = ds.ray(start, end)
         if (end >= left_edge).all() and (end <= right_edge).all() and \
-                (test_ray["grid_level"] >= min_level).all():
+          (min_level is None or (test_ray["grid_level"] >= min_level).all()):
             mylog.info("Found ray after %d attempts." % i)
             del test_ray
             return start, end.d


https://bitbucket.org/yt_analysis/yt/commits/7366235b10be/
Changeset:   7366235b10be
Branch:      yt
User:        stonnes
Date:        2016-07-17 16:54:13+00:00
Summary:     updating Britton's lightray for highres region
Affected #:  1 file

diff -r 2b7062ab493092bc77fdd9f15a954733ec521820 -r 7366235b10beb3711f1f5143a691d7b6ab4fd519 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -273,7 +273,7 @@
                             'near_redshift':self.near_redshift})
 
     def make_light_ray(self, seed=None,
-                       left_edge=None, right_edge=None, 
+                       left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,


https://bitbucket.org/yt_analysis/yt/commits/9e19dbef9aec/
Changeset:   9e19dbef9aec
Branch:      yt
User:        stonnes
Date:        2016-07-17 18:12:47+00:00
Summary:     edited light_ray line 650-1 to get rid of NoneType error
Affected #:  1 file

diff -r 7366235b10beb3711f1f5143a691d7b6ab4fd519 -r 9e19dbef9aec121248c7473f33af021357307e84 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -647,8 +647,8 @@
         for q, my_segment in enumerate(self.light_ray_solution):
             f.write("%04d    %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \
                     (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
-                     my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
-                     my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
+                     my_segment['start'],
+                     my_segment['end'],
                      my_segment['filename']))
         f.close()
 


https://bitbucket.org/yt_analysis/yt/commits/44350d186863/
Changeset:   44350d186863
Branch:      yt
User:        brittonsmith
Date:        2016-07-18 13:56:16+00:00
Summary:     Merging lost head.
Affected #:  1 file

diff -r 7366235b10beb3711f1f5143a691d7b6ab4fd519 -r 44350d186863c9199c8baf921283389f8727f6b4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -177,7 +177,7 @@
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
-                                      left_edge=None, right_edge=None, min_level=0,
+                                      left_edge=None, right_edge=None, min_level=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -765,8 +765,8 @@
 
     return segments
 
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500, min_level=0,
-                     my_random=None):
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500,
+                     min_level=None, my_random=None):
     if my_random is None:
         my_random = np.random.RandomState()
     i = 0
@@ -782,7 +782,7 @@
         i += 1
         test_ray = ds.ray(start, end)
         if (end >= left_edge).all() and (end <= right_edge).all() and \
-                (test_ray["grid_level"] >= min_level).all():
+          (min_level is None or (test_ray["grid_level"] >= min_level).all()):
             mylog.info("Found ray after %d attempts." % i)
             del test_ray
             return start, end.d


https://bitbucket.org/yt_analysis/yt/commits/4afc4bbf1ffe/
Changeset:   4afc4bbf1ffe
Branch:      yt
User:        brittonsmith
Date:        2016-07-18 14:06:34+00:00
Summary:     Add error checking for ray length longer than max length and make min_level more robust.
Affected #:  1 file

diff -r 44350d186863c9199c8baf921283389f8727f6b4 -r 4afc4bbf1ffeecd1c19c109526d0728a5c12cfe9 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -767,6 +767,14 @@
 
 def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500,
                      min_level=None, my_random=None):
+
+    max_length = vector_length(left_edge, right_edge)
+    if ray_length > max_length:
+        raise RuntimeError(
+            ("The maximum segment length in the region %s to %s is %s, " +
+             "but the ray length requested is %s.  Decrease ray length.") %
+             (left_edge, right_edge, max_length, ray_length))
+
     if my_random is None:
         my_random = np.random.RandomState()
     i = 0
@@ -780,9 +788,10 @@
                     np.sin(phi) * np.sin(theta),
                     np.cos(theta)])
         i += 1
-        test_ray = ds.ray(start, end)
+        test_ray = ds.ray(start, end.d)
         if (end >= left_edge).all() and (end <= right_edge).all() and \
-          (min_level is None or (test_ray["grid_level"] >= min_level).all()):
+          (min_level is None or min_level <= 0 or
+           (test_ray["grid_level"] >= min_level).all()):
             mylog.info("Found ray after %d attempts." % i)
             del test_ray
             return start, end.d


https://bitbucket.org/yt_analysis/yt/commits/373815b03a7b/
Changeset:   373815b03a7b
Branch:      yt
User:        brittonsmith
Date:        2016-07-18 14:07:35+00:00
Summary:     No-op merging other head.
Affected #:  1 file



https://bitbucket.org/yt_analysis/yt/commits/351dcd68bde2/
Changeset:   351dcd68bde2
Branch:      yt
User:        brittonsmith
Date:        2016-07-19 12:13:55+00:00
Summary:     Adding more error checking.
Affected #:  1 file

diff -r 373815b03a7b9fb840ff41dcea10c12796478b03 -r 351dcd68bde20b60ca55d18e8abc432681857987 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -123,6 +123,12 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
+        if near_redshift is not None and
+            far_redshift is not None and
+            near_redshift >= far_redshift:
+            raise RuntimeError(
+                "near_redshift must be less than far_redshift.")
+
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.use_minimum_datasets = use_minimum_datasets
@@ -797,5 +803,6 @@
             return start, end.d
         del test_ray
         if i > max_iter:
-            mylog.info("Exceed iteration limit.")
-            return None, None
+            raies RuntimeError(
+                ("Failed to create segment in %d attempts.  " +
+                 "Decreasing ray length is recommended") % i)


https://bitbucket.org/yt_analysis/yt/commits/5960b69a5abe/
Changeset:   5960b69a5abe
Branch:      yt
User:        stonnes
Date:        2016-07-19 22:39:34+00:00
Summary:     added high_res_box_size_fraction to cosmology_splice and light_ray
Affected #:  2 files

diff -r 351dcd68bde20b60ca55d18e8abc432681857987 -r 5960b69a5abed96ccd097733f99444144a96c0da yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -41,7 +41,7 @@
 
     def create_cosmology_splice(self, near_redshift, far_redshift,
                                 minimal=True, max_box_fraction=1.0,
-                                deltaz_min=0.0,
+                                deltaz_min=0.0, high_res_box_size_fraction=1.0,
                                 time_data=True, redshift_data=True):
         r"""Create list of datasets capable of spanning a redshift
         interval.
@@ -69,6 +69,11 @@
             ray segment can be in order to span the redshift interval from
             one dataset to another.
             Default: 1.0 (the size of the box)
+        high_res_box_size_fraction : float
+            In terms of the size of the domain, the size of the region that
+            will be used to calculate the redshift interval from one dataset
+            to another.  Must be <= 1.0.
+            Default:  1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
             in the returned
@@ -100,7 +105,7 @@
         else:
             mylog.error('Both time_data and redshift_data are False.')
             return
-
+        
         # Link datasets in list with pointers.
         # This is used for connecting datasets together.
         for i, output in enumerate(self.splice_outputs):
@@ -120,6 +125,8 @@
         # Calculate minimum delta z for each data dump.
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
+        self.high_res_box_size_fraction = high_res_box_size_fraction
+        
         cosmology_splice = []
  
         if near_redshift == far_redshift:
@@ -264,7 +271,7 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size*self.high_res_box_size_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -285,7 +292,7 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        target_distance = self.simulation.box_size
+        target_distance = self.simulation.box_size #* self.high_res_box_size_fraction
 
         for output in self.splice_outputs:
             z = output['redshift']

diff -r 351dcd68bde20b60ca55d18e8abc432681857987 -r 5960b69a5abed96ccd097733f99444144a96c0da yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -120,12 +120,11 @@
                  near_redshift=None, far_redshift=None,
                  use_minimum_datasets=True, max_box_fraction=1.0,
                  deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
-                 time_data=True, redshift_data=True,
+                 high_res_box_size_fraction=1.0, time_data=True, 
+                 redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
-        if near_redshift is not None and
-            far_redshift is not None and
-            near_redshift >= far_redshift:
+        if near_redshift is not None and far_redshift is not None and near_redshift >= far_redshift:
             raise RuntimeError(
                 "near_redshift must be less than far_redshift.")
 
@@ -134,6 +133,7 @@
         self.use_minimum_datasets = use_minimum_datasets
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
+        self.high_res_box_size_fraction = high_res_box_size_fraction
         self.parameter_filename = parameter_filename
         if load_kwargs is None:
             self.load_kwargs = {}
@@ -178,12 +178,13 @@
               self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                            minimal=self.use_minimum_datasets,
                                            max_box_fraction=max_box_fraction,
+                                           high_res_box_size_fraction=self.high_res_box_size_fraction,
                                            deltaz_min=self.deltaz_min,
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
-                                      left_edge=None, right_edge=None, min_level=None,
+                                      left_edge=None, right_edge=None, min_level=None, 
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -231,7 +232,7 @@
                 self.light_ray_solution[q]['traversal_box_fraction'] = \
                     self.cosmology.comoving_radial_distance(z_next, \
                         self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
-                        self.simulation.box_size
+                        self.simulation.box_size  
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
@@ -771,7 +772,7 @@
 
     return segments
 
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=500,
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
                      min_level=None, my_random=None):
 
     max_length = vector_length(left_edge, right_edge)
@@ -803,6 +804,6 @@
             return start, end.d
         del test_ray
         if i > max_iter:
-            raies RuntimeError(
+            raise RuntimeError(
                 ("Failed to create segment in %d attempts.  " +
                  "Decreasing ray length is recommended") % i)


https://bitbucket.org/yt_analysis/yt/commits/45a74bd07a34/
Changeset:   45a74bd07a34
Branch:      yt
User:        brittonsmith
Date:        2016-07-24 10:55:35+00:00
Summary:     Allowing LightRay to make periodic rays with a left_edge and right_edge smaller than the whole domain.
Affected #:  2 files

diff -r 5960b69a5abed96ccd097733f99444144a96c0da -r 45a74bd07a34a9f3f886b4e436e66e440fa38658 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -184,7 +184,8 @@
                                            redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
-                                      left_edge=None, right_edge=None, min_level=None, 
+                                      left_edge=None, right_edge=None,
+                                      min_level=None, periodic=True,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -243,8 +244,13 @@
                          self.minimum_coherent_box_fraction) or \
                         (box_fraction_used +
                          self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                    # Random start point
-                    if left_edge is not None and right_edge is not None:
+                    if periodic:
+                        self.light_ray_solution[q]['start'] = left_edge + \
+                          (right_edge - left_edge) * my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
+                        box_fraction_used = 0.0
+                    else:
                         ds = load(self.light_ray_solution[q]["filename"])
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
@@ -252,11 +258,6 @@
                             self.light_ray_solution[q]['traversal_box_fraction'],
                                            my_random=my_random, min_level=min_level)
                         del ds
-                    else:
-                        self.light_ray_solution[q]['start'] = my_random.random_sample(3)
-                        theta = np.pi * my_random.random_sample()
-                        phi = 2 * np.pi * my_random.random_sample()
-                        box_fraction_used = 0.0
                 else:
                     # Use end point of previous segment and same theta and phi.
                     self.light_ray_solution[q]['start'] = \
@@ -266,6 +267,7 @@
                     self.light_ray_solution[q]['end'] = \
                       self.light_ray_solution[q]['start'] + \
                         self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        self.simulation.box_size * \
                         np.array([np.cos(phi) * np.sin(theta),
                                   np.sin(phi) * np.sin(theta),
                                   np.cos(theta)])
@@ -279,7 +281,7 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None,
+    def make_light_ray(self, seed=None, periodic=True,
                        left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
@@ -383,6 +385,18 @@
 
         """
 
+        if left_edge is None:
+            left_edge = self.simulation.domain_left_edge
+        elif not hasattr(left_edge, 'units'):
+            left_edge = self.simulation.arr(left_edge, 'code_length')
+        left_edge.convert_to_units('unitary')
+
+        if right_edge is None:
+            right_edge = self.simulation.domain_right_edge
+        elif not hasattr(right_edge, 'units'):
+            right_edge = self.simulation.arr(right_edge, 'code_length')
+        right_edge.convert_to_units('unitary')
+
         if start_position is not None and hasattr(start_position, 'units'):
             start_position = start_position.to('unitary')
         elif start_position is not None :
@@ -401,8 +415,9 @@
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
-                                           left_edge=left_edge, right_edge=right_edge,
-                                           min_level=min_level,
+                                           left_edge=left_edge,
+                                           right_edge=right_edge,
+                                           min_level=min_level, periodic=periodic,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
@@ -450,11 +465,6 @@
             if start_position is not None:
                 my_segment["start"] = ds.arr(my_segment["start"], "unitary")
                 my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-            else:
-                my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                  ds.domain_left_edge
-                my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                  ds.domain_left_edge
 
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
@@ -483,8 +493,8 @@
 
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=ds.domain_left_edge,
-                                        right=ds.domain_right_edge)
+                                        left=left_edge,
+                                        right=right_edge)
 
             # Prepare data structure for subsegment.
             sub_data = {}

diff -r 5960b69a5abed96ccd097733f99444144a96c0da -r 45a74bd07a34a9f3f886b4e436e66e440fa38658 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -110,6 +110,8 @@
         self.domain_right_edge = self.domain_right_edge * self.length_unit
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_length", self.length_unit)
+        self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                               self.length_unit.units.dimensions)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,


https://bitbucket.org/yt_analysis/yt/commits/60906cc56533/
Changeset:   60906cc56533
Branch:      yt
User:        brittonsmith
Date:        2016-07-31 13:10:54+00:00
Summary:     Fixing minimum coherent box fraction option to allow compound ray with a single trajectory.
Affected #:  1 file

diff -r 45a74bd07a34a9f3f886b4e436e66e440fa38658 -r 60906cc5653352e282ec9cf900e4e6e0fcc77e0e yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -175,13 +175,14 @@
             CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
-              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
-                                           minimal=self.use_minimum_datasets,
-                                           max_box_fraction=max_box_fraction,
-                                           high_res_box_size_fraction=self.high_res_box_size_fraction,
-                                           deltaz_min=self.deltaz_min,
-                                           time_data=time_data,
-                                           redshift_data=redshift_data)
+              self.create_cosmology_splice(
+                  self.near_redshift, self.far_redshift,
+                  minimal=self.use_minimum_datasets,
+                  max_box_fraction=max_box_fraction,
+                  high_res_box_size_fraction=self.high_res_box_size_fraction,
+                  deltaz_min=self.deltaz_min,
+                  time_data=time_data,
+                  redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
                                       left_edge=None, right_edge=None,
@@ -237,13 +238,9 @@
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
-                # enough of the box has been used,
-                # or if box_fraction_used will be greater than 1 after this slice.
-                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                        (box_fraction_used >
-                         self.minimum_coherent_box_fraction) or \
-                        (box_fraction_used +
-                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                # enough of the box has been used.
+                if (q == 0) or (box_fraction_used >=
+                                self.minimum_coherent_box_fraction):
                     if periodic:
                         self.light_ray_solution[q]['start'] = left_edge + \
                           (right_edge - left_edge) * my_random.random_sample(3)
@@ -259,9 +256,11 @@
                                            my_random=my_random, min_level=min_level)
                         del ds
                 else:
-                    # Use end point of previous segment and same theta and phi.
+                    # Use end point of previous segment, adjusted for periodicity,
+                    # and the same trajectory.
                     self.light_ray_solution[q]['start'] = \
-                      self.light_ray_solution[q-1]['end'][:]
+                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
+                                      left=left_edge, right=right_edge)
 
                 if "end" not in self.light_ray_solution[q]:
                     self.light_ray_solution[q]['end'] = \
@@ -697,6 +696,22 @@
 
     return np.sqrt(np.power((end - start), 2).sum())
 
+def periodic_adjust(p, left=None, right=None):
+    """
+    Return the point p adjusted for periodic boundaries.
+
+    """
+    if isinstance(p, YTArray):
+        p.convert_to_units("unitary")
+    if left is None:
+        left = np.zeros_like(p)
+    if right is None:
+        right = np.ones_like(p)
+
+    w = right - left
+    p -= left
+    return np.mod(p, w)
+
 def periodic_distance(coord1, coord2):
     """
     periodic_distance(coord1, coord2)


https://bitbucket.org/yt_analysis/yt/commits/318b36c6efd3/
Changeset:   318b36c6efd3
Branch:      yt
User:        brittonsmith
Date:        2016-07-31 13:28:16+00:00
Summary:     Adding to docstrings.
Affected #:  1 file

diff -r 60906cc5653352e282ec9cf900e4e6e0fcc77e0e -r 318b36c6efd3b80d8fdb965101e23a98d89a8c56 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -86,14 +86,18 @@
         datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
-        Used with use_minimum_datasets set to False, this parameter
-        specifies the fraction of the total box size to be traversed
-        before rerandomizing the projection axis and center.  This
-        was invented to allow light rays with thin slices to sample
-        coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.  
-        Do not use for simple rays.
-        Default: 0.0.
+        Use to specify the minimum length of a ray, in terms of the
+        size of the domain, before the trajectory is re-randomized.
+        Set to 0 to have ray trajectory randomized for every dataset.
+        Set to np.inf (infinity) to use a single trajectory for the
+        entire ray.
+        Default: 0.
+    high_res_box_size_fraction : optional, float
+        For use with zoom-in simulations, use to specify the size of the
+        high resolution region of the simulation.  If set, the light ray
+        solution will be calculated such that rays only make use of the
+        high resolution region.
+        Default: 1.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -124,7 +128,8 @@
                  redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
-        if near_redshift is not None and far_redshift is not None and near_redshift >= far_redshift:
+        if near_redshift is not None and far_redshift is not None and \
+          near_redshift >= far_redshift:
             raise RuntimeError(
                 "near_redshift must be less than far_redshift.")
 
@@ -289,7 +294,9 @@
                        get_los_velocity=None, use_peculiar_velocity=True,
                        redshift=None, field_parameters=None, njobs=-1):
         """
-        make_light_ray(seed=None, start_position=None, end_position=None,
+        make_light_ray(seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
+                       start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        use_peculiar_velocity=True, redshift=None,
@@ -305,6 +312,27 @@
         seed : optional, int
             Seed for the random number generator.
             Default: None.
+        periodic : optional, bool
+            If True, ray trajectories will make use of periodic
+            boundaries.  If False, ray trajectories will not be
+            periodic.
+            Default : True.
+        left_edge : optional, iterable of floats of YTArray
+            The left corner of the region in which rays are to be
+            generated.  If None, the left edge will be that of the
+            domain.
+            Default: None.
+        right_edge : optional, iterable of floats of YTArray
+            The right corner of the region in which rays are to be
+            generated.  If None, the right edge will be that of the
+            domain.
+            Default: None.
+        min_left : optional, int
+            The minimum refinement level of the spatial region in which
+            the ray passes.  This can be used with zoom-in simulations
+            where the high resolution region does not keep a constant
+            geometry.
+            Default: None.
         start_position : optional, iterable of floats or YTArray.
             Used only if creating a light ray from a single dataset.
             The coordinates of the starting position of the ray.
@@ -410,7 +438,8 @@
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
-            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
+            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
+                       "Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,


https://bitbucket.org/yt_analysis/yt/commits/b5f4be4c6a57/
Changeset:   b5f4be4c6a57
Branch:      yt
User:        brittonsmith
Date:        2016-07-31 13:35:50+00:00
Summary:     Get domain edges from correct entity.
Affected #:  1 file

diff -r 318b36c6efd3b80d8fdb965101e23a98d89a8c56 -r b5f4be4c6a57bb7b5e0701708548658084c834f6 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -411,17 +411,21 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if self.simulation_type is None:
+            domain = self.ds
+        else:
+            domain = self.simulation
 
         if left_edge is None:
-            left_edge = self.simulation.domain_left_edge
+            left_edge = domain.domain_left_edge
         elif not hasattr(left_edge, 'units'):
-            left_edge = self.simulation.arr(left_edge, 'code_length')
+            left_edge = domain.arr(left_edge, 'code_length')
         left_edge.convert_to_units('unitary')
 
         if right_edge is None:
-            right_edge = self.simulation.domain_right_edge
+            right_edge = domain.domain_right_edge
         elif not hasattr(right_edge, 'units'):
-            right_edge = self.simulation.arr(right_edge, 'code_length')
+            right_edge = domain.arr(right_edge, 'code_length')
         right_edge.convert_to_units('unitary')
 
         if start_position is not None and hasattr(start_position, 'units'):


https://bitbucket.org/yt_analysis/yt/commits/6603b80f1bcb/
Changeset:   6603b80f1bcb
Branch:      yt
User:        brittonsmith
Date:        2016-07-31 15:52:37+00:00
Summary:     Merging.
Affected #:  3 files

diff -r bdc1201a3c1b9c07a269452d9b4dbeea8c4f0965 -r 6603b80f1bcbf498374b6c9c6388ea3447294408 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -41,7 +41,7 @@
 
     def create_cosmology_splice(self, near_redshift, far_redshift,
                                 minimal=True, max_box_fraction=1.0,
-                                deltaz_min=0.0,
+                                deltaz_min=0.0, high_res_box_size_fraction=1.0,
                                 time_data=True, redshift_data=True):
         r"""Create list of datasets capable of spanning a redshift
         interval.
@@ -69,6 +69,11 @@
             ray segment can be in order to span the redshift interval from
             one dataset to another.
             Default: 1.0 (the size of the box)
+        high_res_box_size_fraction : float
+            In terms of the size of the domain, the size of the region that
+            will be used to calculate the redshift interval from one dataset
+            to another.  Must be <= 1.0.
+            Default:  1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
             in the returned
@@ -100,7 +105,7 @@
         else:
             mylog.error('Both time_data and redshift_data are False.')
             return
-
+        
         # Link datasets in list with pointers.
         # This is used for connecting datasets together.
         for i, output in enumerate(self.splice_outputs):
@@ -120,6 +125,8 @@
         # Calculate minimum delta z for each data dump.
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
+        self.high_res_box_size_fraction = high_res_box_size_fraction
+        
         cosmology_splice = []
  
         if near_redshift == far_redshift:
@@ -264,7 +271,7 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size*self.high_res_box_size_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -285,7 +292,7 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        target_distance = self.simulation.box_size
+        target_distance = self.simulation.box_size #* self.high_res_box_size_fraction
 
         for output in self.splice_outputs:
             z = output['redshift']

diff -r bdc1201a3c1b9c07a269452d9b4dbeea8c4f0965 -r 6603b80f1bcbf498374b6c9c6388ea3447294408 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -88,14 +88,18 @@
         datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
-        Used with use_minimum_datasets set to False, this parameter
-        specifies the fraction of the total box size to be traversed
-        before rerandomizing the projection axis and center.  This
-        was invented to allow light rays with thin slices to sample
-        coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.  
-        Do not use for simple rays.
-        Default: 0.0.
+        Use to specify the minimum length of a ray, in terms of the
+        size of the domain, before the trajectory is re-randomized.
+        Set to 0 to have ray trajectory randomized for every dataset.
+        Set to np.inf (infinity) to use a single trajectory for the
+        entire ray.
+        Default: 0.
+    high_res_box_size_fraction : optional, float
+        For use with zoom-in simulations, use to specify the size of the
+        high resolution region of the simulation.  If set, the light ray
+        solution will be calculated such that rays only make use of the
+        high resolution region.
+        Default: 1.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -122,14 +126,21 @@
                  near_redshift=None, far_redshift=None,
                  use_minimum_datasets=True, max_box_fraction=1.0,
                  deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
-                 time_data=True, redshift_data=True,
+                 high_res_box_size_fraction=1.0, time_data=True, 
+                 redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
+        if near_redshift is not None and far_redshift is not None and \
+          near_redshift >= far_redshift:
+            raise RuntimeError(
+                "near_redshift must be less than far_redshift.")
+
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.use_minimum_datasets = use_minimum_datasets
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
+        self.high_res_box_size_fraction = high_res_box_size_fraction
         self.parameter_filename = parameter_filename
         if load_kwargs is None:
             self.load_kwargs = {}
@@ -171,20 +182,24 @@
             CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
-              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
-                                           minimal=self.use_minimum_datasets,
-                                           max_box_fraction=max_box_fraction,
-                                           deltaz_min=self.deltaz_min,
-                                           time_data=time_data,
-                                           redshift_data=redshift_data)
+              self.create_cosmology_splice(
+                  self.near_redshift, self.far_redshift,
+                  minimal=self.use_minimum_datasets,
+                  max_box_fraction=max_box_fraction,
+                  high_res_box_size_fraction=self.high_res_box_size_fraction,
+                  deltaz_min=self.deltaz_min,
+                  time_data=time_data,
+                  redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
+                                      left_edge=None, right_edge=None,
+                                      min_level=None, periodic=True,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        np.random.seed(seed)
+        my_random = np.random.RandomState(seed)
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
@@ -226,33 +241,42 @@
                 self.light_ray_solution[q]['traversal_box_fraction'] = \
                     self.cosmology.comoving_radial_distance(z_next, \
                         self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
-                        self.simulation.box_size
+                        self.simulation.box_size  
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
-                # enough of the box has been used,
-                # or if box_fraction_used will be greater than 1 after this slice.
-                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                        (box_fraction_used >
-                         self.minimum_coherent_box_fraction) or \
-                        (box_fraction_used +
-                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                    # Random start point
-                    self.light_ray_solution[q]['start'] = np.random.random(3)
-                    theta = np.pi * np.random.random()
-                    phi = 2 * np.pi * np.random.random()
-                    box_fraction_used = 0.0
+                # enough of the box has been used.
+                if (q == 0) or (box_fraction_used >=
+                                self.minimum_coherent_box_fraction):
+                    if periodic:
+                        self.light_ray_solution[q]['start'] = left_edge + \
+                          (right_edge - left_edge) * my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
+                        box_fraction_used = 0.0
+                    else:
+                        ds = load(self.light_ray_solution[q]["filename"])
+                        self.light_ray_solution[q]['start'], \
+                          self.light_ray_solution[q]['end'] = \
+                          non_periodic_ray(ds, left_edge, right_edge,
+                            self.light_ray_solution[q]['traversal_box_fraction'],
+                                           my_random=my_random, min_level=min_level)
+                        del ds
                 else:
-                    # Use end point of previous segment and same theta and phi.
+                    # Use end point of previous segment, adjusted for periodicity,
+                    # and the same trajectory.
                     self.light_ray_solution[q]['start'] = \
-                      self.light_ray_solution[q-1]['end'][:]
+                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
+                                      left=left_edge, right=right_edge)
 
-                self.light_ray_solution[q]['end'] = \
-                  self.light_ray_solution[q]['start'] + \
-                    self.light_ray_solution[q]['traversal_box_fraction'] * \
-                    np.array([np.cos(phi) * np.sin(theta),
-                              np.sin(phi) * np.sin(theta),
-                              np.cos(theta)])
+                if "end" not in self.light_ray_solution[q]:
+                    self.light_ray_solution[q]['end'] = \
+                      self.light_ray_solution[q]['start'] + \
+                        self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        self.simulation.box_size * \
+                        np.array([np.cos(phi) * np.sin(theta),
+                                  np.sin(phi) * np.sin(theta),
+                                  np.cos(theta)])
                 box_fraction_used += \
                   self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -263,15 +287,18 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None,
+    def make_light_ray(self, seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=None, use_peculiar_velocity=True, 
-                       redshift=None, njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True,
+                       redshift=None, field_parameters=None, njobs=-1):
         """
-        make_light_ray(seed=None, start_position=None, end_position=None,
+        make_light_ray(seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
+                       start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        use_peculiar_velocity=True, redshift=None,
@@ -287,6 +314,27 @@
         seed : optional, int
             Seed for the random number generator.
             Default: None.
+        periodic : optional, bool
+            If True, ray trajectories will make use of periodic
+            boundaries.  If False, ray trajectories will not be
+            periodic.
+            Default : True.
+        left_edge : optional, iterable of floats of YTArray
+            The left corner of the region in which rays are to be
+            generated.  If None, the left edge will be that of the
+            domain.
+            Default: None.
+        right_edge : optional, iterable of floats of YTArray
+            The right corner of the region in which rays are to be
+            generated.  If None, the right edge will be that of the
+            domain.
+            Default: None.
+        min_left : optional, int
+            The minimum refinement level of the spatial region in which
+            the ray passes.  This can be used with zoom-in simulations
+            where the high resolution region does not keep a constant
+            geometry.
+            Default: None.
         start_position : optional, iterable of floats or YTArray.
             Used only if creating a light ray from a single dataset.
             The coordinates of the starting position of the ray.
@@ -365,6 +413,22 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if self.simulation_type is None:
+            domain = self.ds
+        else:
+            domain = self.simulation
+
+        if left_edge is None:
+            left_edge = domain.domain_left_edge
+        elif not hasattr(left_edge, 'units'):
+            left_edge = domain.arr(left_edge, 'code_length')
+        left_edge.convert_to_units('unitary')
+
+        if right_edge is None:
+            right_edge = domain.domain_right_edge
+        elif not hasattr(right_edge, 'units'):
+            right_edge = domain.arr(right_edge, 'code_length')
+        right_edge.convert_to_units('unitary')
 
         if start_position is not None and hasattr(start_position, 'units'):
             start_position = start_position.to('unitary')
@@ -380,15 +444,22 @@
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
-            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
+            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
+                       "Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
+                                           left_edge=left_edge,
+                                           right_edge=right_edge,
+                                           min_level=min_level, periodic=periodic,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
 
+        if field_parameters is None:
+            field_parameters = {}
+
         # Initialize data structures.
         self._data = {}
         if fields is None: fields = []
@@ -428,11 +499,6 @@
             if start_position is not None:
                 my_segment["start"] = ds.arr(my_segment["start"], "unitary")
                 my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-            else:
-                my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                  ds.domain_left_edge
-                my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                  ds.domain_left_edge
 
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
@@ -461,8 +527,8 @@
 
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=ds.domain_left_edge,
-                                        right=ds.domain_right_edge)
+                                        left=left_edge,
+                                        right=right_edge)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -475,6 +541,8 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = ds.ray(sub_segment[0], sub_segment[1])
+                for key, val in field_parameters.items():
+                    sub_ray.set_field_parameter(key, val)
                 asort = np.argsort(sub_ray["t"])
                 sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,
@@ -671,6 +739,22 @@
 
     return np.sqrt(np.power((end - start), 2).sum())
 
+def periodic_adjust(p, left=None, right=None):
+    """
+    Return the point p adjusted for periodic boundaries.
+
+    """
+    if isinstance(p, YTArray):
+        p.convert_to_units("unitary")
+    if left is None:
+        left = np.zeros_like(p)
+    if right is None:
+        right = np.ones_like(p)
+
+    w = right - left
+    p -= left
+    return np.mod(p, w)
+
 def periodic_distance(coord1, coord2):
     """
     periodic_distance(coord1, coord2)
@@ -755,3 +839,39 @@
         t += dt
 
     return segments
+
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
+                     min_level=None, my_random=None):
+
+    max_length = vector_length(left_edge, right_edge)
+    if ray_length > max_length:
+        raise RuntimeError(
+            ("The maximum segment length in the region %s to %s is %s, " +
+             "but the ray length requested is %s.  Decrease ray length.") %
+             (left_edge, right_edge, max_length, ray_length))
+
+    if my_random is None:
+        my_random = np.random.RandomState()
+    i = 0
+    while True:
+        start = my_random.random_sample(3) * \
+          (right_edge - left_edge) + left_edge
+        theta = np.pi * my_random.random_sample()
+        phi = 2 * np.pi * my_random.random_sample()
+        end = start + ray_length * \
+          np.array([np.cos(phi) * np.sin(theta),
+                    np.sin(phi) * np.sin(theta),
+                    np.cos(theta)])
+        i += 1
+        test_ray = ds.ray(start, end.d)
+        if (end >= left_edge).all() and (end <= right_edge).all() and \
+          (min_level is None or min_level <= 0 or
+           (test_ray["grid_level"] >= min_level).all()):
+            mylog.info("Found ray after %d attempts." % i)
+            del test_ray
+            return start, end.d
+        del test_ray
+        if i > max_iter:
+            raise RuntimeError(
+                ("Failed to create segment in %d attempts.  " +
+                 "Decreasing ray length is recommended") % i)

diff -r bdc1201a3c1b9c07a269452d9b4dbeea8c4f0965 -r 6603b80f1bcbf498374b6c9c6388ea3447294408 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -110,6 +110,8 @@
         self.domain_right_edge = self.domain_right_edge * self.length_unit
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_length", self.length_unit)
+        self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                               self.length_unit.units.dimensions)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,


https://bitbucket.org/yt_analysis/yt/commits/fd06a8e1ab68/
Changeset:   fd06a8e1ab68
Branch:      yt
User:        brittonsmith
Date:        2016-08-01 10:04:08+00:00
Summary:     No longer need to convert to code length.
Affected #:  1 file

diff -r 6603b80f1bcbf498374b6c9c6388ea3447294408 -r fd06a8e1ab68da975ab2d41c23d7a7ec6c00a46f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -521,10 +521,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Convert segment units from unitary to code length for sub_ray
-            my_segment['start'] = my_segment['start'].to('code_length')
-            my_segment['end'] = my_segment['end'].to('code_length')
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
                                         left=left_edge,


https://bitbucket.org/yt_analysis/yt/commits/0c788498f6cf/
Changeset:   0c788498f6cf
Branch:      yt
User:        brittonsmith
Date:        2016-08-01 10:07:38+00:00
Summary:     Make sure high res region is smaller than box size.
Affected #:  1 file

diff -r fd06a8e1ab68da975ab2d41c23d7a7ec6c00a46f -r 0c788498f6cfc9f79667125173b5ab1800a143a8 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -105,7 +105,11 @@
         else:
             mylog.error('Both time_data and redshift_data are False.')
             return
-        
+
+        if high_res_box_size_fraction > 1.:
+            raise RuntimeError(
+                "high_res_box_size_fraction must be <= 1.")
+
         # Link datasets in list with pointers.
         # This is used for connecting datasets together.
         for i, output in enumerate(self.splice_outputs):


https://bitbucket.org/yt_analysis/yt/commits/89f69a6a2cf4/
Changeset:   89f69a6a2cf4
Branch:      yt
User:        brittonsmith
Date:        2016-08-04 12:18:21+00:00
Summary:     Adding unitary units for GadgetSimulation.
Affected #:  1 file

diff -r 0c788498f6cfc9f79667125173b5ab1800a143a8 -r 89f69a6a2cf413edd5590c2b17dcf35c0ccc8a94 yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -102,6 +102,8 @@
             self.box_size = self.box_size * self.length_unit
             self.domain_left_edge = self.domain_left_edge * self.length_unit
             self.domain_right_edge = self.domain_right_edge * self.length_unit
+            self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                                   self.length_unit.units.dimensions)
         else:
             # Read time from file for non-cosmological sim
             self.time_unit = self.quan(


https://bitbucket.org/yt_analysis/yt/commits/ec69690314e2/
Changeset:   ec69690314e2
Branch:      yt
User:        brittonsmith
Date:        2016-08-05 07:54:36+00:00
Summary:     Change wording of docstring and fix a typo.
Affected #:  2 files

diff -r 89f69a6a2cf413edd5590c2b17dcf35c0ccc8a94 -r ec69690314e27e953d2cf8afe2da452d3e1cda03 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -70,9 +70,10 @@
             one dataset to another.
             Default: 1.0 (the size of the box)
         high_res_box_size_fraction : float
-            In terms of the size of the domain, the size of the region that
-            will be used to calculate the redshift interval from one dataset
-            to another.  Must be <= 1.0.
+            The fraction of the total domain size that will be used to
+            calculate the redshift interval from one dataset
+            to another.  Use this when working with zoom-in simulations.
+            Must be <= 1.0.
             Default:  1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets

diff -r 89f69a6a2cf413edd5590c2b17dcf35c0ccc8a94 -r ec69690314e27e953d2cf8afe2da452d3e1cda03 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -96,9 +96,10 @@
         Default: 0.
     high_res_box_size_fraction : optional, float
         For use with zoom-in simulations, use to specify the size of the
-        high resolution region of the simulation.  If set, the light ray
-        solution will be calculated such that rays only make use of the
-        high resolution region.
+        high resolution region of the simulation in terms of the fraction
+        of the total domain size.  If set, the light ray solution will be
+        calculated such that rays only make use of the high resolution
+        region.
         Default: 1.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
@@ -329,7 +330,7 @@
             generated.  If None, the right edge will be that of the
             domain.
             Default: None.
-        min_left : optional, int
+        min_level : optional, int
             The minimum refinement level of the spatial region in which
             the ray passes.  This can be used with zoom-in simulations
             where the high resolution region does not keep a constant


https://bitbucket.org/yt_analysis/yt/commits/225d95187c8b/
Changeset:   225d95187c8b
Branch:      yt
User:        brittonsmith
Date:        2016-08-07 08:18:05+00:00
Summary:     Uncommenting use of high res box size fraction.
Affected #:  1 file

diff -r ec69690314e27e953d2cf8afe2da452d3e1cda03 -r 225d95187c8bdcb0616b9342c28c0016c85e8b50 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -110,6 +110,7 @@
         if high_res_box_size_fraction > 1.:
             raise RuntimeError(
                 "high_res_box_size_fraction must be <= 1.")
+        self.high_res_box_size_fraction = high_res_box_size_fraction
 
         # Link datasets in list with pointers.
         # This is used for connecting datasets together.
@@ -130,8 +131,6 @@
         # Calculate minimum delta z for each data dump.
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
-        self.high_res_box_size_fraction = high_res_box_size_fraction
-        
         cosmology_splice = []
  
         if near_redshift == far_redshift:
@@ -276,7 +275,8 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size*self.high_res_box_size_fraction)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
+                                              self.high_res_box_size_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -297,7 +297,8 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        target_distance = self.simulation.box_size #* self.high_res_box_size_fraction
+        target_distance = self.simulation.box_size * \
+          self.high_res_box_size_fraction
 
         for output in self.splice_outputs:
             z = output['redshift']


https://bitbucket.org/yt_analysis/yt/commits/fbf98d40089a/
Changeset:   fbf98d40089a
Branch:      yt
User:        brittonsmith
Date:        2016-08-12 11:28:07+00:00
Summary:     Make sure omega values in cosmology calculator are floats.
Affected #:  1 file

diff -r a46f10f9e09c7cdca438f19057a9ddd6f4361c31 -r fbf98d40089a9bd143deac0ec7ca548240978023 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -66,9 +66,9 @@
                  omega_curvature = 0.0,
                  unit_registry = None,
                  unit_system = "cgs"):
-        self.omega_matter = omega_matter
-        self.omega_lambda = omega_lambda
-        self.omega_curvature = omega_curvature
+        self.omega_matter = float(omega_matter)
+        self.omega_lambda = float(omega_lambda)
+        self.omega_curvature = float(omega_curvature)
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)


https://bitbucket.org/yt_analysis/yt/commits/6ba6ea28d12f/
Changeset:   6ba6ea28d12f
Branch:      yt
User:        brittonsmith
Date:        2016-08-12 11:29:15+00:00
Summary:     Don't pass unit_registry into cosmology calculator.
Affected #:  2 files

diff -r fbf98d40089a9bd143deac0ec7ca548240978023 -r 6ba6ea28d12f210dc506542d1b4de6cc8ad339ab yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -156,8 +156,7 @@
                 self.cosmology = Cosmology(
                     hubble_constant=self.ds.hubble_constant,
                     omega_matter=self.ds.omega_matter,
-                    omega_lambda=self.ds.omega_lambda,
-                    unit_registry=self.ds.unit_registry)
+                    omega_lambda=self.ds.omega_lambda)
             else:
                 redshift = 0.
             self.light_ray_solution.append({"filename": self.parameter_filename,

diff -r fbf98d40089a9bd143deac0ec7ca548240978023 -r 6ba6ea28d12f210dc506542d1b4de6cc8ad339ab yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -868,8 +868,7 @@
             self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
-                              omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry)
+                              omega_lambda=self.omega_lambda)
             self.critical_density = \
                     self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)


https://bitbucket.org/yt_analysis/yt/commits/8846ebeea82d/
Changeset:   8846ebeea82d
Branch:      yt
User:        brittonsmith
Date:        2016-08-12 11:29:55+00:00
Summary:     Make sure target distance is in cosmology calculator's unit system.
Affected #:  1 file

diff -r 6ba6ea28d12f210dc506542d1b4de6cc8ad339ab -r 8846ebeea82d5f784d34cdc81534fef6c205a1aa yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -363,6 +363,7 @@
         z2 = z1 - 0.1 # just an initial guess
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
         iteration = 1
 
         while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):


https://bitbucket.org/yt_analysis/yt/commits/9554274f49dd/
Changeset:   9554274f49dd
Branch:      yt
User:        brittonsmith
Date:        2016-08-12 12:40:12+00:00
Summary:     Refactoring to use a single function call.
Affected #:  1 file

diff -r 8846ebeea82d5f784d34cdc81534fef6c205a1aa -r 9554274f49ddfa92a0fce4ed63da770fc1f34adf yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -282,72 +282,22 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
             
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance


https://bitbucket.org/yt_analysis/yt/commits/e5c089aba243/
Changeset:   e5c089aba243
Branch:      yt
User:        brittonsmith
Date:        2016-08-12 12:49:20+00:00
Summary:     Use Hubble's Law for the initial guess in redshift calculation.
Affected #:  1 file

diff -r 9554274f49ddfa92a0fce4ed63da770fc1f34adf -r e5c089aba24378f759ed726b75f1f2b9d3fce0bc yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -307,13 +309,14 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
-        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
         iteration = 1
 
         while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):


https://bitbucket.org/yt_analysis/yt/commits/9dc2c5a7bf5e/
Changeset:   9dc2c5a7bf5e
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 13:39:06+00:00
Summary:     Adding warning about reference frames to cosmology docs.
Affected #:  2 files

diff -r e5c089aba24378f759ed726b75f1f2b9d3fce0bc -r 9dc2c5a7bf5e71a4b24bfd422099139643c8cf0d doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r e5c089aba24378f759ed726b75f1f2b9d3fce0bc -r 9dc2c5a7bf5e71a4b24bfd422099139643c8cf0d yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -33,7 +33,14 @@
 
     For an explanation of the various cosmological measures, see, for example 
     Hogg (1999, http://xxx.lanl.gov/abs/astro-ph/9905116).
-    
+
+    WARNING: Cosmological distance calculations return values that are either
+    in the comoving or proper frame, depending on the specific quantity.  For
+    simplicity, the proper and comoving frames are set equal to each other
+    within the cosmology calculator.  This means that for some distance value,
+    x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+    care to understand which reference frame is correct for the given calculation.
+
     Parameters
     ----------
     hubble_constant : float
@@ -58,7 +65,7 @@
     >>> from yt.utilities.cosmology import Cosmology
     >>> co = Cosmology()
     >>> print(co.hubble_time(0.0).in_units("Gyr"))
-    
+
     """
     def __init__(self, hubble_constant = 0.71,
                  omega_matter = 0.27,


https://bitbucket.org/yt_analysis/yt/commits/4f8268143c6d/
Changeset:   4f8268143c6d
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 13:52:06+00:00
Summary:     Adding new kwargs to narrative docs.
Affected #:  2 files

diff -r 225d95187c8bdcb0616b9342c28c0016c85e8b50 -r 4f8268143c6d469313cbc840e206e332c1b0e90b doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,13 +49,17 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with
-  ``use_minimum_datasets`` set to False, this parameter specifies the
-  fraction of the total box size to be traversed before rerandomizing the
-  projection axis and center.  This was invented to allow light rays with
-  thin slices to sample coherent large scale structure, but in practice
-  does not work so well.  Try setting this parameter to 1 and see what
-  happens.  Default: 0.0.
+* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
+  length of a ray, in terms of the size of the domain, before the trajectory
+  is re-randomized.  Set to 0 to have ray trajectory randomized for every
+  dataset.  Set to np.inf (infinity) to use a single trajectory for the
+  entire ray.  Default: 0.0.
+
+* ``high_res_box_size_fraction`` (*float*): For use with zoom-in simulations,
+  use to specify the size of the high resolution region of the simulation in
+  terms of the fraction of the total domain size.  If set, the light ray
+  solution will be calculated such that rays only make use of the high
+  resolution region.  Default: 1.0.
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
@@ -85,6 +89,21 @@
 
 * ``seed`` (*int*): Seed for the random number generator.  Default: None.
 
+* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
+  boundaries.  If False, ray trajectories will not be periodic.  Default : True.
+
+* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
+  region in which rays are to be generated.  If None, the left edge will be
+  that of the domain.  Default: None.
+
+* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
+  the region in which rays are to be generated.  If None, the right edge
+  will be that of the domain.  Default: None.
+
+* ``min_level`` (*int*): The minimum refinement level of the spatial region in
+  which the ray passes.  This can be used with zoom-in simulations where the
+  high resolution region does not keep a constant geometry.  Default: None.
+
 * ``start_position`` (*list* of floats): Used only if creating a light ray
   from a single dataset.  The coordinates of the starting position of the
   ray.  Default: None.

diff -r 225d95187c8bdcb0616b9342c28c0016c85e8b50 -r 4f8268143c6d469313cbc840e206e332c1b0e90b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -320,12 +320,12 @@
             boundaries.  If False, ray trajectories will not be
             periodic.
             Default : True.
-        left_edge : optional, iterable of floats of YTArray
+        left_edge : optional, iterable of floats or YTArray
             The left corner of the region in which rays are to be
             generated.  If None, the left edge will be that of the
             domain.
             Default: None.
-        right_edge : optional, iterable of floats of YTArray
+        right_edge : optional, iterable of floats or YTArray
             The right corner of the region in which rays are to be
             generated.  If None, the right edge will be that of the
             domain.


https://bitbucket.org/yt_analysis/yt/commits/af018407cfa4/
Changeset:   af018407cfa4
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 14:45:50+00:00
Summary:     Adding some LightRay tips.
Affected #:  2 files

diff -r 4f8268143c6d469313cbc840e206e332c1b0e90b -r af018407cfa4adccc2ebd84623b8e0cf027fccb3 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,6 +49,10 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
+* ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
+  maximum length a light ray segment can be in order to span the redshift interval
+  from one dataset to another.  Default: 1.0 (the size of the box)
+
 * ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
   length of a ray, in terms of the size of the domain, before the trajectory
   is re-randomized.  Set to 0 to have ray trajectory randomized for every
@@ -71,7 +75,7 @@
 ---------------------
 
 Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay,make_light_ray`
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
 function will trace out the rays in each dataset and collect information for all the
 fields requested.  The output file will be an HDF5 file containing all the
 cell field values for all the cells that were intersected by the ray.  A
@@ -141,7 +145,74 @@
   slice and 1 to have all processors work together on each projection.
   Default: 1
 
-.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
+Useful Tips for Making LightRays
+--------------------------------
+
+Below are some tips that may come in handy for creating proper LightRays.
+
+How many snapshots do I need?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of snapshots required to traverse some redshift interval depends
+on the simulation box size and cosmological parameters.  Before running an
+expensive simulation only to find out that you don't have enough outputs
+to span the redshift interval you want, have a look at
+:ref:`planning-cosmology-simulations`.  The functionality described there
+will allow you to calculate the precise number of snapshots and specific
+redshifts at which they should be written.
+
+My snapshots are too far apart!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
+allows the user to control how long a ray segment can be for an
+individual dataset.  Be default, the `LightRay` generator will try to
+make segments no longer than the size of the box to avoid sampling the
+same structures more than once.  However, this can be increased in the
+case that the redshift interval between datasets is longer than the
+box size.  Increasing this value should be done with caution as longer
+ray segments run a greater risk of coming back to somewhere near their
+original position.
+
+What if I have a zoom-in simulation?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A zoom-in simulation has a high resolution region embedded within a
+larger, low resolution volume.  In this type of simulation, it is likely
+that you will want the ray segments to stay within the high resolution
+region.  To do this, you must first specify the size of the high
+resolution region when creating the `LightRay` using the
+``high_res_box_size_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+the high resolution region and not the full box size.  Then, in the call to
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
+use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
+precise location of the high resolution region.
+
+Technically speaking, the ray segments should no longer be periodic
+since the high resolution region is only a sub-volume within the
+larger domain.  To make the ray segments non-periodic, set the
+``periodic`` keyword to False.  The LightRay generator will continue
+to generate randomly oriented segments until it finds one that fits
+entirely within the high resolution region.  If you have a high
+resolution region that can move and change shape slightly as structure
+forms, use the `min_level` keyword to mandate that the ray segment only
+pass through cells that are refined to at least some minimum level.
+
+If the size of the high resolution region is not large enough to
+span the required redshift interval, the `LightRay` generator can
+be configured to treat the high resolution region as if it were
+periodic simply by setting the ``periodic`` keyword to True.  This
+option should be used with caution as it will lead to the creation
+of disconnected ray segments within a single dataset.
+
+.. note::
+
+   As of :code:`yt-3.0`, the functionality for recording properties of
+   the nearest halo to each element of the ray no longer exists.  This
+   is still available in :code:`yt-2.x`.  If you would like to use this
+   feature in :code:`yt-3.x`, help is needed to port it over.  Contact
+   the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 4f8268143c6d469313cbc840e206e332c1b0e90b -r af018407cfa4adccc2ebd84623b8e0cf027fccb3 doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -4,7 +4,7 @@
 ===================================================
 
 If you want to run a cosmological simulation that will have just enough data
-outputs to create a cosmology splice, the
+outputs to create a light cone or light ray, the
 :meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
 function will calculate a list of redshifts outputs that will minimally
 connect a redshift interval.


https://bitbucket.org/yt_analysis/yt/commits/25bc094aa3fd/
Changeset:   25bc094aa3fd
Branch:      yt
User:        brittonsmith
Date:        2016-08-15 14:48:55+00:00
Summary:     One more tip.
Affected #:  1 file

diff -r af018407cfa4adccc2ebd84623b8e0cf027fccb3 -r 25bc094aa3fd324a456cb43ab4e3641e63bf9828 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -206,6 +206,12 @@
 option should be used with caution as it will lead to the creation
 of disconnected ray segments within a single dataset.
 
+I want a continous trajectory over the entire ray.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set the ``minimum_coherent_box_fraction`` keyword argument to a very
+large number, like infinity (`numpy.inf`).
+
 .. note::
 
    As of :code:`yt-3.0`, the functionality for recording properties of


https://bitbucket.org/yt_analysis/yt/commits/f92867811908/
Changeset:   f92867811908
Branch:      yt
User:        brittonsmith
Date:        2016-08-17 15:31:54+00:00
Summary:     Typo.
Affected #:  1 file

diff -r 25bc094aa3fd324a456cb43ab4e3641e63bf9828 -r f928678119086ca03c39b77425c884614cd1aa15 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -183,7 +183,7 @@
 region.  To do this, you must first specify the size of the high
 resolution region when creating the `LightRay` using the
 ``high_res_box_size_fraction`` keyword.  This will make sure that
-the calculation of the spacing of the segment datasets only takes into
+the calculation of the spacing of the segment datasets only takes into account
 the high resolution region and not the full box size.  Then, in the call to
 :func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
 use the ``left_edge`` and ``right_edge`` keyword arguments to specify the


https://bitbucket.org/yt_analysis/yt/commits/4733bb8fed39/
Changeset:   4733bb8fed39
Branch:      yt
User:        brittonsmith
Date:        2016-08-19 10:45:54+00:00
Summary:     Merging.
Affected #:  58 files

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -116,7 +116,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -388,10 +388,10 @@
 To make things easier to load these sample datasets, you can add the parent
 directory to your downloaded sample data to your *yt path*.
 If you set the option ``test_data_dir``, in the section ``[yt]``,
-in ``~/.yt/config``, yt will search this path for them.
+in ``~/.config/yt/ytrc``, yt will search this path for them.
 
 This means you can download these datasets to ``/big_drive/data_for_yt`` , add
-the appropriate item to ``~/.yt/config``, and no matter which directory you are
+the appropriate item to ``~/.config/yt/ytrc``, and no matter which directory you are
 in when running yt, it will also check in *that* directory.
 
 
@@ -437,12 +437,11 @@
 hand, you may want it to output a lot more, since you can't figure out exactly what's going
 wrong, and you want to output some debugging information. The yt log level can be
 changed using the :ref:`configuration-file`, either by setting it in the
-``$HOME/.yt/config`` file:
+``$HOME/.config/yt/ytrc`` file:
 
 .. code-block:: bash
 
-   [yt]
-   loglevel = 10 # This sets the log level to "DEBUG"
+   $ yt config set yt loglevel 10  # This sets the log level to "DEBUG"
 
 which would produce debug (as well as info, warning, and error) messages, or at runtime:
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -133,6 +133,16 @@
      <tr valign="top"><td width="25%"><p>
+           <a href="sharing_data.html">Sharing Data</a>
+         </p>
+       </td>
+       <td width="75%">
+         <p class="linkdescr">The yt Hub</p>
+       </td>
+     </tr>
+     <tr valign="top">
+       <td width="25%">
+         <p><a href="reference/index.html">Reference Materials</a></p></td>
@@ -185,6 +195,7 @@
    analyzing/analysis_modules/index
    examining/index
    developing/index
+   sharing_data
    reference/index
    faq/index
    Getting Help <help/index>

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -54,35 +54,7 @@
 
 This will print the list of available subcommands,
 
-.. code-block:: bash
-
-    help                Print help message
-    bootstrap_dev       Bootstrap a yt development environment
-    bugreport           Report a bug in yt
-    hub_register        Register a user on the Hub: http://hub.yt-project.org/
-    hub_submit          Submit a mercurial repository to the yt Hub
-                        (http://hub.yt-project.org/), creating a BitBucket
-                        repo in the process if necessary.
-    instinfo            Get some information about the yt installation
-    version             Get some information about the yt installation (this
-                        is an alias for instinfo).
-    load                Load a single dataset into an IPython instance
-    mapserver           Serve a plot in a GMaps-style interface
-    pastebin            Post a script to an anonymous pastebin
-    pastebin_grab       Print an online pastebin to STDOUT for local use.
-    upload_notebook     Upload an IPython notebook to hub.yt-project.org.
-    plot                Create a set of images
-    rpdb                Connect to a currently running (on localhost) rpd
-                        session. Commands run with --rpdb will trigger an rpdb
-                        session with any uncaught exceptions.
-    notebook            Run the IPython Notebook
-    stats               Print stats and max/min value of a given field (if
-                        requested), for one or more datasets (default field is
-                        Density)
-    update              Update the yt installation to the most recent version
-    delete_image        Delete image from imgur.com.
-    upload_image        Upload an image to imgur.com. Must be PNG.
-
+.. config_help:: yt
 
 To execute any such function, simply run:
 
@@ -217,13 +189,12 @@
 
 This command will accept the filename of a ``.ipynb`` file (generated from an
 IPython notebook session) and upload it to the `yt hub
-<http://hub.yt-project.org/>` where others will be able to view it, and
+<https://hub.yt/>`__ where others will be able to view it, and
 download it.  This is an easy method for recording a sequence of commands,
 their output, narrative information, and then sharing that with others.  These
 notebooks will be viewable online, and the appropriate URLs will be returned on
 the command line.
 
-
 rpdb
 ++++
 
@@ -272,3 +243,95 @@
 The image uploaded using ``upload_image`` is assigned with a unique hash that
 can be used to remove it. This subcommand provides an easy way to send a delete
 request directly to the `imgur.com <http://imgur.com/>`_.
+
+Hub helper
+~~~~~~~~~~
+
+The :code:`yt hub` command-line tool allows to interact with the `yt hub
+<https://hub.yt>`__. The following subcommands are currently available:
+
+.. config_help:: yt hub
+
+register
+++++++++
+
+This subcommand starts an interactive process of creating an account on the `yt
+hub <https://hub.yt/>`__. Please note that the yt Hub also supports multiple OAuth
+providers such as Google, Bitbucket and GitHub for authentication. 
+See :ref:`hub-APIkey` for more information.
+
+start
++++++
+
+This subcommand launches the Jupyter Notebook on the `yt Hub <https://hub.yt>`__
+with a chosen Hub folder mounted to the ``/data`` directory inside the notebook.
+If no path is given all the `example yt datasets
+<https://yt-project.org/data>`_ are mounted by default. The appropriate URL
+allowing to access the Notebook will be returned on the commandline. 
+
+Example:
+
+.. code-block:: bash
+
+   $ yt hub start
+   $ yt hub start /user/xarthisius/Public
+
+
+Config helper
+~~~~~~~~~~~~~
+
+The :code:`yt config` command-line tool allows you to modify and access yt's
+configuration without manually locating and opening the config file in an editor.
+To get a quick list of available commands, just type:
+
+.. code-block:: bash
+
+   yt config -h
+
+This will print the list of available subcommands:
+
+.. config_help:: yt config
+
+Since the yt version 3.3.2, the previous location of the configuration file
+(``$HOME/.yt/config``) has been deprecated in favor of a location adhering to the
+`XDG Base Directory Specification
+<https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+(``$XDG_HOME_CONFIG/yt/ytrc``). In order to perform an automatic migration of
+the old config, you are encouraged to run:
+
+.. code-block:: bash
+
+   yt config migrate
+
+that will copy your current config file to the new location and store a backup
+copy as ``$HOME/.yt/config.bak``.
+
+Examples
+++++++++
+
+Listing current content of the config file:
+
+.. code-block:: bash
+
+   $ yt config list
+   [yt]
+   loglevel = 50
+
+Obtaining a single config value by name:
+
+.. code-block:: bash
+
+   $ yt config get yt loglevel
+   50
+
+Changing a single config value:
+
+.. code-block:: bash
+
+   $ yt config set yt loglevel 10
+
+Removing a single config entry:
+
+.. code-block:: bash
+
+   $ yt config rm yt loglevel

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -18,9 +18,9 @@
 Configuration File Format
 ^^^^^^^^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/config`` as a configuration
+yt will look for and recognize the file ``$HOME/.config/yt/ytrc`` as a configuration
 file, containing several options that can be modified and adjusted to control
-runtime behavior.  For example, a sample ``$HOME/.yt/config`` file could look
+runtime behavior.  For example, a sample ``$HOME/.config/yt/ytrc`` file could look
 like:
 
 .. code-block:: none
@@ -31,7 +31,17 @@
 
 This configuration file would set the logging threshold much lower, enabling
 much more voluminous output from yt.  Additionally, it increases the number of
-datasets tracked between instantiations of yt.
+datasets tracked between instantiations of yt. The configuration file can be
+managed using the ``yt config`` helper. It can list, add, modify and remove
+options from the configuration file, e.g.:
+
+.. code-block:: none
+
+   $ yt config -h
+   $ yt config list
+   $ yt config set yt loglevel 1
+   $ yt config rm yt maximumstoreddatasets
+
 
 Configuration Options At Runtime
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/sharing_data.rst
--- /dev/null
+++ b/doc/source/sharing_data.rst
@@ -0,0 +1,117 @@
+.. _sharing-data:
+
+The yt Hub
+==========
+
+.. contents::
+   :depth: 2
+   :local:
+   :backlinks: none
+
+What is the yt Hub?
+-------------------
+
+The yt data Hub is a mechanism by which images, data objects and projects can be
+shared with other people. For instance, one can upload a dataset and allow other
+people to remotely analyze it with a jupyter notebook or upload notebooks and
+view them from any web browser.
+
+.. note:: All items posted on the hub are public!
+
+Over time, more widgets will be added, and more datatypes will be able to be
+uploaded.  If you are interested in adding more ways of sharing data, please
+email the developers' list.  We would like to add support for 3D widgets such
+as isocontours as well as interactive binning and rebinning of data from yt
+data objects, to be displayed as phase plots and profiles.
+
+.. note:: Working with the Hub requires additional dependencies to be installed.
+          You can obtain them by running: ``pip install yt[hub]``. 
+
+.. _hub-APIkey:
+
+Obtaining an API key
+--------------------
+
+In order to interact with the yt Hub, you need to obtain API key, which is
+available only for authenticated users. You can `log into
+<https://girder.hub.yt/#?dialog=login>`_ the Hub using your Google, GitHub or
+Bitbucket account. After you log in, an API key can be generated under the *My
+account* page, which can be accessed through the dropdown menu in the upper
+right corner. 
+
+.. image:: _static/apiKey01.jpg
+   :width: 50 %
+
+Select the *API keys* tab and press *Create new key* button:
+
+.. image:: _static/apiKey02.jpg
+   :width: 50 %
+
+By convention, the *Name* field of API keys can be used to specify what
+application is making use of the key in a human-readable way e.g. ``yt
+command``, although you may name your key however you want.
+
+.. image:: _static/apiKey03.jpg
+   :width: 50 %
+
+After the API Key is created you can obtain it by clicking *show* link:
+
+.. image:: _static/apiKey04.jpg
+   :width: 50 %
+
+For more information about API keys please see `this document
+<http://girder.readthedocs.io/en/latest/user-guide.html?highlight=API%20keys#api-keys>`__.
+
+After you have gotten your API key, update your config file:
+
+.. code-block:: none
+
+   $ yt config set yt hub_api_key 3fd1de56c2114c13a2de4dd51g10974b
+
+Replace ``3fd1de56c2114c13a2de4dd51g10974b`` with your API key.
+
+Registering a User
+^^^^^^^^^^^^^^^^^^
+
+If you do not wish to use OAuth authentication, you can create a Hub account
+using ``yt`` command. To register a user:
+
+.. code-block:: bash
+
+   $ yt hub register
+
+This will walk you through the process of registering. You will need to supply
+a name, a username, a password and an email address. Apart from creating a new
+user account, it will also generate an API key and append it to the yt's config
+file.  At this point, you're ready to go!
+
+What Can Be Uploaded
+--------------------
+
+Currently, the yt hub can accept these types of data:
+
+ * Raw data files, scripts.
+ * IPython notebooks: these are stored on the hub and are made available for
+   download and via the IPython `nbviewer <http://nbviewer.ipython.org/>`_
+   service.
+
+How to Upload Data
+------------------
+
+Uploading data can be performed using the ``girder-cli`` command tool or
+directly via the web interface. Please refer to ``girder-cli`` `documentation page
+<http://girder.readthedocs.io/en/latest/python-client.html>`_ for additional
+information.
+
+Uploading Notebooks
+^^^^^^^^^^^^^^^^^^^
+
+Notebooks can be uploaded from the bash command line:
+
+.. code-block:: bash
+
+   yt upload_notebook notebook_file.ipynb
+
+After the notebook is finished uploading, yt will print a link to the raw
+notebook as well as an nbviewer link to the same notebook.  Your notebooks will
+be stored under your hub Public directory.

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -278,17 +278,17 @@
 Overplot Cell Edges
 ~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_cell_edges(line_width=1.0, alpha = 1.0,
-                                  color = (0.0, 0.0, 0.0))
+.. function:: annotate_cell_edges(line_width=0.002, alpha=1.0, color='black')
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.CellEdgesCallback`.)
 
-    Annotate the edges of cells, where the ``line_width`` in pixels is specified.
-    The ``alpha`` of the overlaid image and the ``color`` of the lines are also
-    specifiable.  Note that because the lines are drawn from both sides of a
-    cell, the image sometimes has the effect of doubling the line width.
-    Color here is in RGB float values (0 to 1).
+    Annotate the edges of cells, where the ``line_width`` relative to size of
+    the longest plot axis is specified.  The ``alpha`` of the overlaid image and
+    the ``color`` of the lines are also specifiable.  Note that because the
+    lines are drawn from both sides of a cell, the image sometimes has the
+    effect of doubling the line width.  Color here is a matplotlib color name or
+    a 3-tuple of RGB float values.
 
 .. python-script::
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -17,6 +17,24 @@
 plots of slices, projections, 1D profiles, and 2D profiles (phase plots), all of
 which are described below.
 
+.. _viewing-plots:
+
+Viewing Plots
+-------------
+
+YT uses an environment neutral plotting mechanism that detects the appropriate
+matplotlib configuration for a given environment, however it defaults to a basic
+renderer. To utilize interactive plots in matplotlib supported
+environments (Qt, GTK, WX, etc.) simply call the ``toggle_interactivity()`` function. Below is an
+example in a jupyter notebook environment, but the same command should work
+in other environments as well:
+
+.. code-block:: python
+ 
+   %matplotlib notebook
+   import yt
+   yt.toggle_interactivity()
+
 .. _simple-inspection:
 
 Slices & Projections
@@ -519,6 +537,27 @@
    slc.set_center((0.5, 0.503))
    slc.save()
 
+Flipping the plot view axes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+By default, all :class:`~yt.visualization.plot_window.PlotWindow` objects plot
+with the assumption that the eastern direction on the plot forms a right handed
+coordinate system with the ``normal`` and ``north_vector`` for the system, whether
+explicitly or implicitly defined. This setting can be toggled or explicitly defined
+by the user at initialization:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   #slicing with non right-handed coordinates
+   slc = yt.SlicePlot(ds, 'x', 'velocity_x', right_handed=False)
+   slc.annotate_title('Not Right Handed')
+   slc.save("NotRightHanded.png")
+
+   #switching to right-handed coordinates
+   slc.toggle_right_handed()
+   slc.annotate_title('Right Handed')
+   slc.save("Standard.png")
 
 .. _hiding-colorbar-and-axes:
 
@@ -686,6 +725,7 @@
    slc.set_cbar_minorticks('all', 'off')
    slc.save()
 
+
 .. _matplotlib-customization:
 
 Further customization via matplotlib

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -105,7 +105,7 @@
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your
-``~/.yt/config`` file under the heading ``[yt]`` as the variable
+``~/.config/yt/ytrc`` file under the heading ``[yt]`` as the variable
 ``sketchfab_api_key``.  If you don't want to do this, you can also supply it as
 an argument to the function ``export_sketchfab``.
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -118,7 +118,7 @@
     from yt.visualization.api import Streamlines
 
     ds = yt.load('DD1701') # Load ds
-    streamlines = Streamlines(ds, [0.5]*3)
+    streamlines = Streamlines(ds, ds.domain_center)
     streamlines.integrate_through_volume()
     stream = streamlines.path(0)
     matplotlib.pylab.semilogy(stream['t'], stream['density'], '-x')

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b setup.py
--- a/setup.py
+++ b/setup.py
@@ -380,6 +380,9 @@
         'IPython',
         'cython',
     ],
+    extras_require = {
+        'hub':  ["girder_client"]
+    },
     cmdclass={'sdist': sdist, 'build_ext': build_ext, 'build_py': build_py},
     author="The yt project",
     author_email="yt-dev at lists.spacepope.org",

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_001:
+  local_pw_006:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -73,6 +73,9 @@
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
 
+  local_axialpix_001:
+    - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
+
 other_tests:
   unittests:
      - '-v'

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -93,7 +93,8 @@
     parallel_profile, \
     enable_plugins, \
     memory_checker, \
-    deprecated_class
+    deprecated_class, \
+    toggle_interactivity
 from yt.utilities.logger import ytLogger as mylog
 
 import yt.utilities.physical_constants as physical_constants

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -45,12 +45,12 @@
        lower wavelength bound in angstroms.
     lambda_max : float
        upper wavelength bound in angstroms.
-    n_lambda : float
+    n_lambda : int
        number of wavelength bins.
     """
 
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        self.n_lambda = n_lambda
+        self.n_lambda = int(n_lambda)
         # lambda, flux, and tau are wavelength, flux, and optical depth
         self.lambda_min = lambda_min
         self.lambda_max = lambda_max
@@ -301,7 +301,7 @@
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
+            pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
@@ -550,8 +550,9 @@
         """
         mylog.info("Writing spectrum to fits file: %s.", filename)
         col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
-        col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
-        cols = pyfits.ColDefs([col1, col2])
+        col2 = pyfits.Column(name='tau', format='E', array=self.tau_field)
+        col3 = pyfits.Column(name='flux', format='E', array=self.flux_field)
+        cols = pyfits.ColDefs([col1, col2, col3])
         tbhdu = pyfits.BinTableHDU.from_columns(cols)
         tbhdu.writeto(filename, clobber=True)
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -294,73 +296,23 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size * \
           self.high_res_box_size_fraction
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
             
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -370,10 +322,12 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -168,8 +168,7 @@
                 self.cosmology = Cosmology(
                     hubble_constant=self.ds.hubble_constant,
                     omega_matter=self.ds.omega_matter,
-                    omega_lambda=self.ds.omega_lambda,
-                    unit_registry=self.ds.unit_registry)
+                    omega_lambda=self.ds.omega_lambda)
             else:
                 redshift = 0.
             self.light_ray_solution.append({"filename": self.parameter_filename,

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -85,7 +85,7 @@
         if self.children is None: return
         for child in self.children:
             child.add_validator(validator)
-        
+
     def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,6 +16,7 @@
 #-----------------------------------------------------------------------------
 
 import os
+import warnings
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(
@@ -48,8 +49,9 @@
     test_storage_dir = '/does/not/exist',
     test_data_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://hub.yt-project.org/upload',
+    hub_url = 'https://girder.hub.yt/api/v1',
     hub_api_key = '',
+    hub_sandbox = '/collection/yt_sandbox/data',
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
@@ -67,20 +69,28 @@
     default_colormap = 'arbre',
     ray_tracing_engine = 'embree',
     )
+
+CONFIG_DIR = os.environ.get(
+    'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config', 'yt'))
+if not os.path.exists(CONFIG_DIR):
+    os.makedirs(CONFIG_DIR)
+
+CURRENT_CONFIG_FILE = os.path.join(CONFIG_DIR, 'ytrc')
+_OLD_CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.yt', 'config')
+
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten
 # without them.
 
-__fn = os.path.expanduser("~/.yt/config")
-if os.path.exists(__fn):
-    f = open(__fn).read()
+if os.path.exists(_OLD_CONFIG_FILE):
+    f = open(_OLD_CONFIG_FILE).read()
     if any(header in f for header in ["[lagos]","[raven]","[fido]","[enki]"]):
         print("***********************************************************")
         print("* Upgrading configuration file to new format; saving old. *")
         print("***********************************************************")
         # This is of the old format
         cp = configparser.ConfigParser()
-        cp.read(__fn)
+        cp.read(_OLD_CONFIG_FILE)
         # NOTE: To avoid having the 'DEFAULT' section here,
         # we are not passing in ytcfg_defaults to the constructor.
         new_cp = configparser.ConfigParser()
@@ -91,16 +101,21 @@
                 if option.lower() in ytcfg_defaults:
                     new_cp.set("yt", option, cp.get(section, option))
                     print("Setting %s to %s" % (option, cp.get(section, option)))
-        open(__fn + ".old", "w").write(f)
-        new_cp.write(open(__fn, "w"))
-# Pathological check for Kraken
-#elif os.path.exists("~/"):
-#    if not os.path.exists("~/.yt"):
-#            print "yt is creating a new directory, ~/.yt ."
-#            os.mkdir(os.path.exists("~/.yt/"))
-#    # Now we can read in and write out ...
-#    new_cp = configparser.ConfigParser(ytcfg_defaults)
-#    new_cp.write(__fn)
+        open(_OLD_CONFIG_FILE + ".old", "w").write(f)
+        new_cp.write(open(_OLD_CONFIG_FILE, "w"))
+
+    msg = (
+        "The configuration file {} is deprecated. "
+        "Please migrate your config to {} by running: "
+        "'yt config migrate'"
+    )
+    warnings.warn(msg.format(_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE))
+
+if not os.path.exists(CURRENT_CONFIG_FILE):
+    cp = configparser.ConfigParser()
+    cp.add_section("yt")
+    with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
+        cp.write(new_cfg)
 
 class YTConfigParser(configparser.ConfigParser):
     def __setitem__(self, key, val):
@@ -108,12 +123,8 @@
     def __getitem__(self, key):
         self.get(key[0], key[1])
 
-if os.path.exists(os.path.expanduser("~/.yt/config")):
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg', os.path.expanduser('~/.yt/config')])
-else:
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg'])
+ytcfg = YTConfigParser(ytcfg_defaults)
+ytcfg.read([_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE, 'yt.cfg'])
 if not ytcfg.has_section("yt"):
     ytcfg.add_section("yt")
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -127,4 +127,3 @@
 
     return simulation_time_series_registry[simulation_type](parameter_filename,
                                                             find_outputs=find_outputs)
-

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -868,8 +868,7 @@
             self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
-                              omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry)
+                              omega_lambda=self.omega_lambda)
             self.critical_density = \
                     self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/frontends/rockstar/definitions.py
--- a/yt/frontends/rockstar/definitions.py
+++ b/yt/frontends/rockstar/definitions.py
@@ -38,7 +38,7 @@
 # Note the final field here, which is a field for min/max format revision in
 # which the field appears.
 
-KNOWN_REVISIONS=[0, 1]
+KNOWN_REVISIONS=[0, 1, 2]
 
 halo_dt = [
     ('particle_identifier', np.int64),
@@ -101,6 +101,12 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
+    ('type', np.int32, (2, 100)),
+    ('sm', np.float32, (2, 100)),
+    ('gas', np.float32, (2, 100)),
+    ('bh', np.float32, (2, 100)),
+    ('peak_density', np.float32, (2, 100)),
+    ('av_density', np.float32, (2, 100)),
 ]
 
 halo_dts = {}

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -668,7 +668,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+        grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
 
     if length_unit is None:
         length_unit = 'code_length'

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,7 @@
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
+        ("metallicity", ("Zsun", ["metallicity"], None)),
 
         # We need to have a bunch of species fields here, too
         ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -16,7 +16,7 @@
 
 import errno
 from yt.extern.six import string_types
-from yt.extern.six.moves import input
+from yt.extern.six.moves import input, builtins
 import time
 import inspect
 import traceback
@@ -986,3 +986,21 @@
     except ImportError:
         pass
     return dummy_context_manager()
+
+interactivity = False
+
+"""Sets the condition that interactive backends can be used."""
+def toggle_interactivity():
+    global interactivity
+    interactivity = not interactivity
+    if interactivity is True:
+        if '__IPYTHON__' in dir(builtins):
+            import IPython
+            shell = IPython.get_ipython()
+            shell.magic('matplotlib')
+        else:
+            import matplotlib
+            matplotlib.interactive(True)
+
+def get_interactivity():
+    return interactivity

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -113,7 +113,7 @@
             # re-order the array and squeeze out the dummy dim
             return np.squeeze(np.transpose(img, (yax, xax, ax)))
 
-        elif dimension < 3:
+        elif self.axis_id.get(dimension, dimension) < 3:
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias, dimension, periodic)
         else:

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/geometry/coordinates/tests/test_axial_pixelization.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_axial_pixelization.py
@@ -0,0 +1,9 @@
+from yt.testing import \
+    fake_amr_ds, _geom_transforms
+from yt.utilities.answer_testing.framework import \
+    AxialPixelizationTest
+
+def test_axial_pixelization():
+    for geom in sorted(_geom_transforms):
+        ds = fake_amr_ds(geometry=geom)
+        yield AxialPixelizationTest(ds)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -480,6 +480,9 @@
     test_unit = Unit('m_geom/l_geom**3')
     assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
 
+    test_unit = Unit('1e9*cm')
+    assert_equal(test_unit.latex_repr, '1.0 \\times 10^{9}\\ \\rm{cm}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -926,12 +926,12 @@
     yt_quan2 = YTQuantity.from_pint(p_quan)
 
     yield assert_array_equal, p_arr, yt_arr.to_pint()
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_array_equal, yt_arr, YTArray.from_pint(p_arr)
     yield assert_array_equal, yt_arr, yt_arr2
 
     yield assert_equal, p_quan.magnitude, yt_quan.to_pint().magnitude
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_equal, yt_quan, YTQuantity.from_pint(p_quan)
     yield assert_equal, yt_quan, yt_quan2
 

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -127,10 +127,20 @@
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):
             expr = expr.subs(symbols[i], symbols[0])
-
+    prefix = None
+    if isinstance(expr, Mul):
+        coeffs = expr.as_coeff_Mul()
+        if coeffs[0] == 1 or not isinstance(coeffs[0], Float):
+            pass
+        else:
+            expr = coeffs[1]
+            prefix = Float(coeffs[0], 2)
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
 
+    if prefix is not None:
+        latex_repr = latex(prefix, mul_symbol="times") + '\\ ' + latex_repr
+
     if latex_repr == '1':
         return ''
     else:

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -713,7 +713,7 @@
         >>> c = yt.YTArray.from_pint(b)
         """
         p_units = []
-        for base, exponent in arr.units.items():
+        for base, exponent in arr._units.items():
             bs = convert_pint_units(base)
             p_units.append("%s**(%s)" % (bs, Rational(exponent)))
         p_units = "*".join(p_units)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -324,6 +324,8 @@
             self.ds = data_dir_load(ds_fn)
 
     def __call__(self):
+        if AnswerTestingTest.result_storage is None:
+            return
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
@@ -860,6 +862,47 @@
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
 
+class AxialPixelizationTest(AnswerTestingTest):
+    # This test is typically used once per geometry or coordinates type.
+    # Feed it a dataset, and it checks that the results of basic pixelization
+    # don't change.
+    _type_name = "AxialPixelization"
+    _attrs = ('geometry',)
+    def __init__(self, ds_fn, decimals=None):
+        super(AxialPixelizationTest, self).__init__(ds_fn)
+        self.decimals = decimals
+        self.geometry = self.ds.coordinates.name
+
+    def run(self):
+        rv = {}
+        ds = self.ds
+        for i, axis in enumerate(ds.coordinates.axis_order):
+            (bounds, center, display_center) = \
+                    pw.get_window_parameters(axis, ds.domain_center, None, ds)
+            slc = ds.slice(axis, center[i])
+            xax = ds.coordinates.axis_name[ds.coordinates.x_axis[axis]]
+            yax = ds.coordinates.axis_name[ds.coordinates.y_axis[axis]]
+            pix_x = ds.coordinates.pixelize(axis, slc, xax, bounds, (512, 512))
+            pix_y = ds.coordinates.pixelize(axis, slc, yax, bounds, (512, 512))
+            # Wipe out all NaNs
+            pix_x[np.isnan(pix_x)] = 0.0
+            pix_y[np.isnan(pix_y)] = 0.0
+            rv['%s_x' % axis] = pix_x
+            rv['%s_y' % axis] = pix_y
+        return rv
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_almost_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose_units(new_result[k], old_result[k],
+                                      10**(-self.decimals))
+
+
 def requires_sim(sim_fn, sim_type, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -26,7 +26,7 @@
 import json
 import pprint
 
-from yt.config import ytcfg
+from yt.config import ytcfg, CURRENT_CONFIG_FILE
 ytcfg["yt","__command_line"] = "True"
 from yt.startup_tasks import parser, subparsers
 from yt.funcs import \
@@ -39,11 +39,13 @@
     enable_plugins
 from yt.extern.six import add_metaclass, string_types
 from yt.extern.six.moves import urllib, input
+from yt.extern.six.moves.urllib.parse import urlparse
 from yt.convenience import load
 from yt.visualization.plot_window import \
     SlicePlot, \
     ProjectionPlot
 from yt.utilities.metadata import get_metadata
+from yt.utilities.configure import set_config
 from yt.utilities.exceptions import \
     YTOutputNotIdentified, YTFieldNotParseable
 
@@ -117,16 +119,48 @@
         print("Changeset = %s" % vstring.strip().decode("utf-8"))
     print("---")
     return vstring
+    
 
+def _get_girder_client():
+    try:
+        import girder_client
+    except ImportError:
+        print("this command requires girder_client to be installed")
+        print("Please install them using your python package manager, e.g.:")
+        print("   pip install girder_client --user")
+        exit()
+
+    hub_url = urlparse(ytcfg.get("yt", "hub_url"))
+    gc = girder_client.GirderClient(apiUrl=hub_url.geturl())
+    gc.authenticate(apiKey=ytcfg.get("yt", "hub_api_key"))
+    return gc
+
+
+_subparsers = {None: subparsers}
+_subparsers_description = {
+    'config': 'Get and set configuration values for yt',
+    'hub': 'Interact with the yt Hub'
+}
 class YTCommandSubtype(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
         if cls.name is not None:
             names = ensure_list(cls.name)
+            if cls.subparser not in _subparsers:
+                try:
+                    description = _subparsers_description[cls.subparser]
+                except KeyError:
+                    description = cls.subparser
+                parent_parser = argparse.ArgumentParser(add_help=False)
+                p = subparsers.add_parser(cls.subparser, help=description,
+                                          description=description,
+                                          parents=[parent_parser])
+                _subparsers[cls.subparser] = p.add_subparsers(
+                    title=cls.subparser, dest=cls.subparser)
+            sp = _subparsers[cls.subparser]
             for name in names:
-                sc = subparsers.add_parser(name,
-                    description = cls.description,
-                    help = cls.description)
+                sc = sp.add_parser(name, description=cls.description, 
+                                   help=cls.description)
                 sc.set_defaults(func=cls.run)
                 for arg in cls.args:
                     _add_arg(sc, arg)
@@ -138,6 +172,7 @@
     description = ""
     aliases = ()
     ndatasets = 1
+    subparser = None
 
     @classmethod
     def run(cls, args):
@@ -557,25 +592,27 @@
 
 
 class YTHubRegisterCmd(YTCommand):
-    name = "hub_register"
+    subparser = "hub"
+    name = "register"
     description = \
         """
-        Register a user on the Hub: http://hub.yt-project.org/
+        Register a user on the yt Hub: http://hub.yt/
         """
     def __call__(self, args):
-        # We need these pieces of information:
-        #   1. Name
-        #   2. Email
-        #   3. Username
-        #   4. Password (and password2)
-        #   5. (optional) URL
-        #   6. "Secret" key to make it epsilon harder for spammers
-        if ytcfg.get("yt","hub_api_key") != "":
+        try:
+            import requests
+        except ImportError:
+            print("yt {} requires requests to be installed".format(self.name))
+            print("Please install them using your python package manager, e.g.:")
+            print("   pip install requests --user")
+            exit()
+        if ytcfg.get("yt", "hub_api_key") != "":
             print("You seem to already have an API key for the hub in")
-            print("~/.yt/config .  Delete this if you want to force a")
+            print("{} . Delete this if you want to force a".format(CURRENT_CONFIG_FILE))
             print("new user registration.")
+            exit()
         print("Awesome!  Let's start by registering a new user for you.")
-        print("Here's the URL, for reference: http://hub.yt-project.org/ ")
+        print("Here's the URL, for reference: http://hub.yt/ ")
         print()
         print("As always, bail out with Ctrl-C at any time.")
         print()
@@ -586,8 +623,11 @@
         print()
         print("To start out, what's your name?")
         print()
-        name = input("Name? ")
-        if len(name) == 0: sys.exit(1)
+        first_name = input("First Name? ")
+        if len(first_name) == 0: sys.exit(1)
+        print()
+        last_name = input("Last Name? ")
+        if len(last_name) == 0: sys.exit(1)
         print()
         print("And your email address?")
         print()
@@ -604,33 +644,32 @@
             print("Sorry, they didn't match!  Let's try again.")
             print()
         print()
-        print("Would you like a URL displayed for your user?")
-        print("Leave blank if no.")
-        print()
-        url = input("URL? ")
-        print()
         print("Okay, press enter to register.  You should receive a welcome")
         print("message at %s when this is complete." % email)
         print()
         input()
-        data = dict(name = name, email = email, username = username,
-                    password = password1, password2 = password2,
-                    url = url, zap = "rowsdower")
-        data = urllib.parse.urlencode(data)
-        hub_url = "https://hub.yt-project.org/create_user"
-        req = urllib.request.Request(hub_url, data)
-        try:
-            urllib.request.urlopen(req).read()
-        except urllib.error.HTTPError as exc:
-            if exc.code == 400:
-                print("Sorry, the Hub couldn't create your user.")
-                print("You can't register duplicate users, which is the most")
-                print("common cause of this error.  All values for username,")
-                print("name, and email must be unique in our system.")
-                sys.exit(1)
-        except urllib.URLError as exc:
-            print("Something has gone wrong.  Here's the error message.")
-            raise exc
+
+        data = dict(firstName=first_name, email=email, login=username,
+                    password=password1, lastName=last_name, admin=False)
+        hub_url = ytcfg.get("yt", "hub_url")
+        req = requests.post(hub_url + "/user", data=data)
+      
+        if req.ok:
+            headers = {'Girder-Token': req.json()['authToken']['token']}
+        else:
+            if req.status_code == 400:
+                print("Registration failed with 'Bad request':")
+                print(req.json()["message"])
+            exit(1)
+        print("User registration successful")
+        print("Obtaining API key...")
+        req = requests.post(hub_url + "/api_key", headers=headers,
+                            data={'name': 'ytcmd', 'active': True})
+        apiKey = req.json()["key"]
+
+        print("Storing API key in configuration file")
+        set_config("yt", "hub_api_key", apiKey)
+        
         print()
         print("SUCCESS!")
         print()
@@ -810,40 +849,60 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTHubStartNotebook(YTCommand):
+    args = (
+        dict(dest="folderId", default=ytcfg.get("yt", "hub_sandbox"),
+             nargs="?", 
+             help="(Optional) Hub folder to mount inside the Notebook"),
+    )
+    description = \
+        """
+        Start the Jupyter Notebook on the yt Hub.
+        """
+    subparser = "hub"
+    name = "start"
+    def __call__(self, args):
+        gc = _get_girder_client()
+
+        # TODO: should happen server-side
+        _id = gc._checkResourcePath(args.folderId)
+
+        resp = gc.post("/notebook/{}".format(_id))
+        try:
+            print("Launched! Please visit this URL:")
+            print("    https://tmpnb.hub.yt" + resp['url'])
+            print()
+        except (KeyError, TypeError):
+            print("Something went wrong. The yt Hub responded with : ")
+            print(resp)
+
 class YTNotebookUploadCmd(YTCommand):
     args = (dict(short="file", type=str),)
     description = \
         """
-        Upload an IPython notebook to hub.yt-project.org.
+        Upload an IPython Notebook to the yt Hub.
         """
 
     name = "upload_notebook"
     def __call__(self, args):
-        filename = args.file
-        if not os.path.isfile(filename):
-            raise IOError(filename)
-        if not filename.endswith(".ipynb"):
-            print("File must be an IPython notebook!")
-            return 1
-        import json
-        try:
-            t = json.loads(open(filename).read())['metadata']['name']
-        except (ValueError, KeyError):
-            print("File does not appear to be an IPython notebook.")
-        if len(t) == 0:
-            t = filename.strip(".ipynb")
-        from yt.utilities.minimal_representation import MinimalNotebook
-        mn = MinimalNotebook(filename, t)
-        rv = mn.upload()
+        gc = _get_girder_client()
+        username = gc.get("/user/me")["login"]
+        gc.upload(args.file, "/user/{}/Public".format(username))
+
+        _id = gc.resourceLookup(
+            "/user/{}/Public/{}".format(username, args.file))["_id"]
+        _fid = next(gc.listFile(_id))["_id"]
+        hub_url = urlparse(ytcfg.get("yt", "hub_url"))
         print("Upload successful!")
         print()
         print("To access your raw notebook go here:")
         print()
-        print("  %s" % (rv['url']))
+        print("  {}://{}/#item/{}".format(hub_url.scheme, hub_url.netloc, _id))
         print()
         print("To view your notebook go here:")
         print()
-        print("  %s" % (rv['url'].replace("/go/", "/nb/")))
+        print("  http://nbviewer.jupyter.org/urls/{}/file/{}/download".format(
+            hub_url.netloc + hub_url.path, _fid))
         print()
 
 class YTPlotCmd(YTCommand):
@@ -947,7 +1006,7 @@
             )
     description = \
         """
-        Run the IPython Notebook
+        Start the Jupyter Notebook locally. 
         """
     def __call__(self, args):
         kwargs = {}
@@ -1141,6 +1200,61 @@
             print()
             pprint.pprint(rv)
 
+
+class YTConfigGetCmd(YTCommand):
+    subparser = 'config'
+    name = 'get'
+    description = 'get a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to retrieve.'))
+    def __call__(self, args):
+        from yt.utilities.configure import get_config
+        print(get_config(args.section, args.option))
+
+
+class YTConfigSetCmd(YTCommand):
+    subparser = 'config'
+    name = 'set'
+    description = 'set a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to set.'),
+            dict(short='value', help='The value to set the option to.'))
+    def __call__(self, args):
+        from yt.utilities.configure import set_config
+        set_config(args.section, args.option, args.value)
+
+
+class YTConfigRemoveCmd(YTCommand):
+    subparser = 'config'
+    name = 'rm'
+    description = 'remove a config option'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to remove.'))
+    def __call__(self, args):
+        from yt.utilities.configure import rm_config
+        rm_config(args.section, args.option)
+
+
+class YTConfigListCmd(YTCommand):
+    subparser = 'config'
+    name = 'list'
+    description = 'show the config content'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import write_config
+        write_config(sys.stdout)
+
+
+class YTConfigMigrateCmd(YTCommand):
+    subparser = 'config'
+    name = 'migrate'
+    description = 'migrate old config file'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import migrate_config
+        migrate_config()
+
+
 class YTSearchCmd(YTCommand):
     args = (dict(short="-o", longname="--output",
                  action="store", type=str,

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/configure.py
--- /dev/null
+++ b/yt/utilities/configure.py
@@ -0,0 +1,92 @@
+# -*- coding: UTF-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import argparse
+from yt.config import CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE
+from yt.extern.six.moves import configparser
+
+CONFIG = configparser.SafeConfigParser()
+CONFIG.read([CURRENT_CONFIG_FILE])
+
+
+def get_config(section, option):
+    return CONFIG.get(section, option)
+
+
+def set_config(section, option, value):
+    if not CONFIG.has_section(section):
+        CONFIG.add_section(section)
+    CONFIG.set(section, option, value)
+    write_config()
+
+
+def write_config(fd=None):
+    if fd is None:
+        with open(CURRENT_CONFIG_FILE, 'w') as fd:
+            CONFIG.write(fd)
+    else:
+        CONFIG.write(fd)
+
+def migrate_config():
+    if not os.path.exists(_OLD_CONFIG_FILE):
+        print("Old config not found.")
+        sys.exit()
+    CONFIG.read(_OLD_CONFIG_FILE)
+    print("Writing a new config file to: {}".format(CURRENT_CONFIG_FILE))
+    write_config()
+    print("Backing up the old config file: {}.bak".format(_OLD_CONFIG_FILE))
+    os.rename(_OLD_CONFIG_FILE, _OLD_CONFIG_FILE + '.bak')
+
+
+def rm_config(section, option):
+    CONFIG.remove_option(section, option)
+    write_config()
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='Get and set configuration values for yt')
+    subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
+
+    get_parser = subparsers.add_parser('get', help='get a config value')
+    set_parser = subparsers.add_parser('set', help='set a config value')
+    rm_parser = subparsers.add_parser('rm', help='remove a config option')
+    subparsers.add_parser('migrate', help='migrate old config file')
+    subparsers.add_parser('list', help='show all config values')
+
+    get_parser.add_argument(
+        'section', help='The section containing the option.')
+    get_parser.add_argument('option', help='The option to retrieve.')
+
+    set_parser.add_argument(
+        'section', help='The section containing the option.')
+    set_parser.add_argument('option', help='The option to set.')
+    set_parser.add_argument('value', help='The value to set the option to.')
+
+    rm_parser.add_argument(
+        'section', help='The section containing the option to remove.')
+    rm_parser.add_argument('option', help='The option to remove.')
+
+    args = parser.parse_args()
+
+    if args.cmd == 'get':
+        print(get_config(args.section, args.option))
+    elif args.cmd == 'set':
+        set_config(args.section, args.option, args.value)
+    elif args.cmd == 'list':
+        write_config(sys.stdout)
+    elif args.cmd == 'migrate':
+        migrate_config()
+    elif args.cmd == 'rm':
+        rm_config(args.section, args.option)
+
+if __name__ == '__main__':
+    main()  # pragma: no cover

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -33,7 +33,14 @@
 
     For an explanation of the various cosmological measures, see, for example 
     Hogg (1999, http://xxx.lanl.gov/abs/astro-ph/9905116).
-    
+
+    WARNING: Cosmological distance calculations return values that are either
+    in the comoving or proper frame, depending on the specific quantity.  For
+    simplicity, the proper and comoving frames are set equal to each other
+    within the cosmology calculator.  This means that for some distance value,
+    x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+    care to understand which reference frame is correct for the given calculation.
+
     Parameters
     ----------
     hubble_constant : float
@@ -58,7 +65,7 @@
     >>> from yt.utilities.cosmology import Cosmology
     >>> co = Cosmology()
     >>> print(co.hubble_time(0.0).in_units("Gyr"))
-    
+
     """
     def __init__(self, hubble_constant = 0.71,
                  omega_matter = 0.27,
@@ -66,9 +73,9 @@
                  omega_curvature = 0.0,
                  unit_registry = None,
                  unit_system = "cgs"):
-        self.omega_matter = omega_matter
-        self.omega_lambda = omega_lambda
-        self.omega_curvature = omega_curvature
+        self.omega_matter = float(omega_matter)
+        self.omega_lambda = float(omega_lambda)
+        self.omega_curvature = float(omega_curvature)
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -110,6 +110,13 @@
             # We are somewhere in the middle of the face
             temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
             temp_y = ((temp_x - vc.left_edge[i])*vc.idds[i])
+            # There are some really tough cases where we just within a couple
+            # least significant places of the edge, and this helps prevent
+            # killing the calculation through a segfault in those cases.
+            if -1 < temp_y < 0 and step[i] > 0:
+                temp_y = 0.0
+            elif vc.dims[i] - 1 < temp_y < vc.dims[i] and step[i] < 0:
+                temp_y = vc.dims[i] - 1
             cur_ind[i] =  <int> (floor(temp_y))
         if step[i] > 0:
             temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/utilities/tests/test_config.py
--- /dev/null
+++ b/yt/utilities/tests/test_config.py
@@ -0,0 +1,142 @@
+# -*- coding: UTF-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import contextlib
+import mock
+import os
+import sys
+import unittest
+import yt.utilities.command_line
+import yt.config
+from yt.config import \
+    CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE
+from yt.extern.six import StringIO
+from yt.extern.six.moves.configparser import NoOptionError, SafeConfigParser
+
+
+_DUMMY_CFG = ['[yt]', 'loglevel = 49']
+
+
+ at contextlib.contextmanager
+def captureOutput():
+    oldout, olderr = sys.stdout, sys.stderr
+    try:
+        out = [StringIO(), StringIO()]
+        sys.stdout, sys.stderr = out
+        yield out
+    finally:
+        sys.stdout, sys.stderr = oldout, olderr
+        out[0] = out[0].getvalue()
+        out[1] = out[1].getvalue()
+
+
+class SysExitException(Exception):
+    pass
+
+
+def setUpModule():
+    for cfgfile in (CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE):
+        if os.path.exists(cfgfile):
+            os.rename(cfgfile, cfgfile + '.bak_test')
+
+            if cfgfile == CURRENT_CONFIG_FILE:
+                yt.utilities.configure.CONFIG = SafeConfigParser()
+                if not yt.utilities.configure.CONFIG.has_section('yt'):
+                    yt.utilities.configure.CONFIG.add_section('yt')
+
+
+def tearDownModule():
+    for cfgfile in (CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE): 
+        if os.path.exists(cfgfile + '.bak_test'):
+            os.rename(cfgfile + '.bak_test', cfgfile)
+
+
+class TestYTConfig(unittest.TestCase):
+    def _runYTConfig(self, args):
+        args = ['yt', 'config'] + args
+        retcode = 0
+
+        with mock.patch.object(sys, 'argv', args),\
+                mock.patch('sys.exit', side_effect=SysExitException) as exit,\
+                captureOutput() as output:
+            try:
+                yt.utilities.command_line.run_main()
+            except SysExitException:
+                args = exit.mock_calls[0][1]
+                retcode = args[0] if len(args) else 0
+        return {
+            'rc': retcode,
+            'stdout': output[0],
+            'stderr': output[1]
+        }
+
+class TestYTConfigCommands(TestYTConfig):
+    def testConfigCommands(self):
+        self.assertFalse(os.path.exists(CURRENT_CONFIG_FILE))
+
+        info = self._runYTConfig(['--help'])
+        self.assertEqual(info['rc'], 0)
+        self.assertEqual(info['stderr'], '')
+        self.assertIn('Get and set configuration values for yt',
+                      info['stdout'])
+
+        info = self._runYTConfig(['list'])
+        self.assertEqual(info['rc'], 0)
+        self.assertIn('[yt]', info['stdout'])
+
+        info = self._runYTConfig(['set', 'yt', '__parallel', 'True'])
+        self.assertEqual(info['rc'], 0)
+
+        info = self._runYTConfig(['get', 'yt', '__parallel'])
+        self.assertEqual(info['rc'], 0)
+        self.assertEqual(info['stdout'].strip(), 'True')
+
+        info = self._runYTConfig(['rm', 'yt', '__parallel'])
+        self.assertEqual(info['rc'], 0)
+
+        with self.assertRaises(NoOptionError):
+            self._runYTConfig(['get', 'yt', 'foo'])
+    
+    def tearDown(self):
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+
+class TestYTConfigMigration(TestYTConfig):
+
+    def setUp(self):
+        if not os.path.exists(os.path.dirname(_OLD_CONFIG_FILE)):
+            os.makedirs(os.path.dirname(_OLD_CONFIG_FILE))
+
+        with open(_OLD_CONFIG_FILE, 'w') as fh:
+            for line in _DUMMY_CFG:
+                fh.write('{}\n'.format(line))
+        
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+
+    def tearDown(self):
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+        if os.path.exists(_OLD_CONFIG_FILE + '.bak'):
+            os.remove(_OLD_CONFIG_FILE + '.bak')
+
+    def testConfigMigration(self):
+        self.assertFalse(os.path.exists(CURRENT_CONFIG_FILE))
+        self.assertTrue(os.path.exists(_OLD_CONFIG_FILE))
+        
+        info = self._runYTConfig(['migrate'])
+        self.assertEqual(info['rc'], 0)
+
+        self.assertTrue(os.path.exists(CURRENT_CONFIG_FILE))
+        self.assertFalse(os.path.exists(_OLD_CONFIG_FILE))
+        self.assertTrue(os.path.exists(_OLD_CONFIG_FILE + '.bak'))
+
+        with open(CURRENT_CONFIG_FILE, 'r') as fh:
+            new_cfg = ''.join(fh.readlines())
+        self.assertEqual(new_cfg.strip().split('\n'), _DUMMY_CFG)

diff -r f928678119086ca03c39b77425c884614cd1aa15 -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -20,9 +20,30 @@
     mylog, \
     iterable, \
     get_brewer_cmap, \
-    matplotlib_style_context
+    matplotlib_style_context, \
+    get_interactivity
 import numpy as np
 
+backend_dict = {'GTK': ['backend_gtk', 'FigureCanvasGTK',
+                       'FigureManagerGTK'],
+               'GTKAgg': ['backend_gtkagg', 'FigureCanvasGTKAgg'],
+               'GTKCairo': ['backend_gtkcairo', 'FigureCanvasGTKCairo'],
+               'MacOSX': ['backend_macosx', 'FigureCanvasMac', 'FigureManagerMac'],
+               'Qt4Agg': ['backend_qt4agg', 'FigureCanvasQTAgg'],
+               'Qt5Agg': ['backend_gt5agg', 'FigureCanvasQTAgg'],
+               'TkAgg': ['backend_tkagg', 'FigureCanvasTkAgg'],
+               'WX': ['backend_wx', 'FigureCanvasWx'],
+               'WXAgg': ['backend_wxagg', 'FigureCanvasWxAgg'],
+               'GTK3Cairo': ['backend_gtk3cairo',
+                             'FigureCanvasGTK3Cairo',
+                             'FigureManagerGTK3Cairo'],
+               'GTK3Agg': ['backend_gtk3agg', 'FigureCanvasGTK3Agg',
+                           'FigureManagerGTK3Agg'],
+               'WebAgg': ['backend_webagg', 'FigureCanvasWebAgg'],
+               'nbAgg': ['backend_nbagg', 'FigureCanvasNbAgg',
+                         'FigureManagerNbAgg'],
+                'agg': ['backend_agg', 'FigureCanvasAgg']}
+
 
 class CallbackWrapper(object):
     def __init__(self, viewer, window_plot, frb, field, font_properties, 
@@ -50,14 +71,15 @@
         self.font_color = font_color
         self.field = field
 
+
 class PlotMPL(object):
-    """A base class for all yt plots made using matplotlib.
+    """A base class for all yt plots made using matplotlib, that is backend independent.
 
     """
+
     def __init__(self, fsize, axrect, figure, axes):
         """Initialize PlotMPL class"""
         import matplotlib.figure
-        from ._mpl_imports import FigureCanvasAgg
         self._plot_valid = True
         if figure is None:
             self.figure = matplotlib.figure.Figure(figsize=fsize, frameon=True)
@@ -70,11 +92,33 @@
             axes.cla()
             axes.set_position(axrect)
             self.axes = axes
-        self.canvas = FigureCanvasAgg(self.figure)
+        canvas_classes = self._set_canvas()
+        self.canvas = canvas_classes[0](self.figure)
+        if len(canvas_classes) > 1:
+            self.manager = canvas_classes[1](self.canvas, 1)
         for which in ['major', 'minor']:
             for axis in 'xy':
                 self.axes.tick_params(which=which, axis=axis, direction='in')
 
+    def _set_canvas(self):
+        self.interactivity = get_interactivity()
+        if self.interactivity:
+            backend = str(matplotlib.get_backend())
+        else:
+            backend = 'agg'
+
+        for key in backend_dict.keys():
+            if key == backend:
+                mod = __import__('matplotlib.backends', globals(), locals(),
+                                 [backend_dict[key][0]], 0)
+                submod = getattr(mod, backend_dict[key][0])
+                FigureCanvas = getattr(submod, backend_dict[key][1])
+                if len(backend_dict[key]) > 2:
+                    FigureManager = getattr(submod, backend_dict[key][2])
+                    return [FigureCanvas, FigureManager]
+                else:
+                    return [FigureCanvas]
+
     def save(self, name, mpl_kwargs=None, canvas=None):
         """Choose backend and save image to disk"""
         from ._mpl_imports import \
@@ -105,6 +149,12 @@
             canvas.print_figure(name, **mpl_kwargs)
         return name
 
+    def show(self):
+        try:
+            self.manager.show()
+        except AttributeError:
+            self.canvas.show()
+
     def _get_labels(self):
         ax = self.axes
         labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/7e9b4e90e05a/
Changeset:   7e9b4e90e05a
Branch:      yt
User:        brittonsmith
Date:        2016-08-19 11:12:03+00:00
Summary:     Refactoring to eliminate high_res_box_size_fraction since its use is redundant with max_box_fraction.
Affected #:  2 files

diff -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b -r 7e9b4e90e05a3f92056f8bff77c064308e80d54f yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -43,7 +43,7 @@
 
     def create_cosmology_splice(self, near_redshift, far_redshift,
                                 minimal=True, max_box_fraction=1.0,
-                                deltaz_min=0.0, high_res_box_size_fraction=1.0,
+                                deltaz_min=0.0,
                                 time_data=True, redshift_data=True):
         r"""Create list of datasets capable of spanning a redshift
         interval.
@@ -69,14 +69,12 @@
         max_box_fraction : float
             In terms of the size of the domain, the maximum length a light
             ray segment can be in order to span the redshift interval from
-            one dataset to another.
+            one dataset to another.  If using a zoom-in simulation, this
+            parameter can be set to the length of the high resolution
+            region so as to limit ray segments to that size.  If the
+            high resolution region is not cubical, the smallest side
+            should be used.
             Default: 1.0 (the size of the box)
-        high_res_box_size_fraction : float
-            The fraction of the total domain size that will be used to
-            calculate the redshift interval from one dataset
-            to another.  Use this when working with zoom-in simulations.
-            Must be <= 1.0.
-            Default:  1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
             in the returned
@@ -109,11 +107,6 @@
             mylog.error('Both time_data and redshift_data are False.')
             return
 
-        if high_res_box_size_fraction > 1.:
-            raise RuntimeError(
-                "high_res_box_size_fraction must be <= 1.")
-        self.high_res_box_size_fraction = high_res_box_size_fraction
-
         # Link datasets in list with pointers.
         # This is used for connecting datasets together.
         for i, output in enumerate(self.splice_outputs):
@@ -128,6 +121,7 @@
                 output['next'] = self.splice_outputs[i + 1]
 
         # Calculate maximum delta z for each data dump.
+        self.max_box_fraction = max_box_fraction
         self._calculate_deltaz_max()
 
         # Calculate minimum delta z for each data dump.
@@ -157,7 +151,7 @@
             self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
             cosmology_splice.append(self.splice_outputs[0])
             z = cosmology_splice[-1]["redshift"]
-            z_target = z - max_box_fraction * cosmology_splice[-1]["dz_max"]
+            z_target = z - cosmology_splice[-1]["dz_max"]
 
             # fill redshift space with datasets
             while ((z_target > near_redshift) and
@@ -185,7 +179,7 @@
 
                 cosmology_splice.append(current_slice)
                 z = current_slice["redshift"]
-                z_target = z - max_box_fraction * current_slice["dz_max"]
+                z_target = z - current_slice["dz_max"]
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
@@ -212,8 +206,8 @@
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
         
-        # change the 'next' and 'previous' pointers to point to the correct outputs for the created
-        # splice
+        # change the 'next' and 'previous' pointers to point to the correct outputs
+        # for the created splice
         for i, output in enumerate(cosmology_splice):
             if len(cosmology_splice) == 1:
                 output['previous'] = None
@@ -278,7 +272,7 @@
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
-                                              self.high_res_box_size_fraction)
+                                              self.max_box_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -297,11 +291,11 @@
         """
 
         target_distance = self.simulation.box_size * \
-          self.high_res_box_size_fraction
+          self.max_box_fraction
         for output in self.splice_outputs:
             output['dz_max'] = self._deltaz_forward(output['redshift'],
                                                     target_distance)
-            
+
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).

diff -r 4733bb8fed393da4d1ec8cf9b90ace5f19d5193b -r 7e9b4e90e05a3f92056f8bff77c064308e80d54f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -81,7 +81,11 @@
     max_box_fraction : optional, float
         In terms of the size of the domain, the maximum length a light
         ray segment can be in order to span the redshift interval from
-        one dataset to another.
+        one dataset to another.  If using a zoom-in simulation, this
+        parameter can be set to the length of the high resolution
+        region so as to limit ray segments to that size.  If the
+        high resolution region is not cubical, the smallest side
+        should be used.
         Default: 1.0 (the size of the box)
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
@@ -94,13 +98,6 @@
         Set to np.inf (infinity) to use a single trajectory for the
         entire ray.
         Default: 0.
-    high_res_box_size_fraction : optional, float
-        For use with zoom-in simulations, use to specify the size of the
-        high resolution region of the simulation in terms of the fraction
-        of the total domain size.  If set, the light ray solution will be
-        calculated such that rays only make use of the high resolution
-        region.
-        Default: 1.0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -127,8 +124,7 @@
                  near_redshift=None, far_redshift=None,
                  use_minimum_datasets=True, max_box_fraction=1.0,
                  deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
-                 high_res_box_size_fraction=1.0, time_data=True, 
-                 redshift_data=True,
+                 time_data=True, redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
         if near_redshift is not None and far_redshift is not None and \
@@ -141,7 +137,6 @@
         self.use_minimum_datasets = use_minimum_datasets
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
-        self.high_res_box_size_fraction = high_res_box_size_fraction
         self.parameter_filename = parameter_filename
         if load_kwargs is None:
             self.load_kwargs = {}
@@ -186,7 +181,6 @@
                   self.near_redshift, self.far_redshift,
                   minimal=self.use_minimum_datasets,
                   max_box_fraction=max_box_fraction,
-                  high_res_box_size_fraction=self.high_res_box_size_fraction,
                   deltaz_min=self.deltaz_min,
                   time_data=time_data,
                   redshift_data=redshift_data)


https://bitbucket.org/yt_analysis/yt/commits/4a4e2527eb7e/
Changeset:   4a4e2527eb7e
Branch:      yt
User:        brittonsmith
Date:        2016-08-19 11:29:56+00:00
Summary:     Fixing some units errors.
Affected #:  1 file

diff -r 7e9b4e90e05a3f92056f8bff77c064308e80d54f -r 4a4e2527eb7e28a744e3ed9e5208d50d9788226a yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -250,6 +250,9 @@
                         box_fraction_used = 0.0
                     else:
                         ds = load(self.light_ray_solution[q]["filename"])
+                        ray_length = \
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
+                                  "unitary")
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
                           non_periodic_ray(ds, left_edge, right_edge,
@@ -853,13 +856,13 @@
                     np.sin(phi) * np.sin(theta),
                     np.cos(theta)])
         i += 1
-        test_ray = ds.ray(start, end.d)
+        test_ray = ds.ray(start, end)
         if (end >= left_edge).all() and (end <= right_edge).all() and \
           (min_level is None or min_level <= 0 or
            (test_ray["grid_level"] >= min_level).all()):
             mylog.info("Found ray after %d attempts." % i)
             del test_ray
-            return start, end.d
+            return start, end
         del test_ray
         if i > max_iter:
             raise RuntimeError(


https://bitbucket.org/yt_analysis/yt/commits/28caefec8b07/
Changeset:   28caefec8b07
Branch:      yt
User:        brittonsmith
Date:        2016-08-19 12:53:27+00:00
Summary:     Adding two more tests.
Affected #:  1 file

diff -r 4a4e2527eb7e28a744e3ed9e5208d50d9788226a -r 28caefec8b0778bcbe4e66333cce588c6dc2f0d9 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -41,6 +41,48 @@
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nested():
+    """
+    This test generates a cosmological light ray confing the ray to a subvolume
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    left = np.ones(3) * 0.25
+    right = np.ones(3) * 0.75
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nonperiodic():
+    """
+    This test generates a cosmological light ray using non-periodic segments
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, periodic=False,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
 def test_light_ray_non_cosmo():


https://bitbucket.org/yt_analysis/yt/commits/9770523cd6df/
Changeset:   9770523cd6df
Branch:      yt
User:        brittonsmith
Date:        2016-08-19 13:52:47+00:00
Summary:     Updating docs.
Affected #:  1 file

diff -r 28caefec8b0778bcbe4e66333cce588c6dc2f0d9 -r 9770523cd6df033f2cade8fc023d000aabf4bcf6 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -51,7 +51,10 @@
 
 * ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
   maximum length a light ray segment can be in order to span the redshift interval
-  from one dataset to another.  Default: 1.0 (the size of the box)
+  from one dataset to another.  If using a zoom-in simulation, this parameter can
+  be set to the length of the high resolution region so as to limit ray segments
+  to that size.  If the high resolution region is not cubical, the smallest side
+  should be used.  Default: 1.0 (the size of the box)
 
 * ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
   length of a ray, in terms of the size of the domain, before the trajectory
@@ -59,12 +62,6 @@
   dataset.  Set to np.inf (infinity) to use a single trajectory for the
   entire ray.  Default: 0.0.
 
-* ``high_res_box_size_fraction`` (*float*): For use with zoom-in simulations,
-  use to specify the size of the high resolution region of the simulation in
-  terms of the fraction of the total domain size.  If set, the light ray
-  solution will be calculated such that rays only make use of the high
-  resolution region.  Default: 1.0.
-
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
 
@@ -182,9 +179,11 @@
 that you will want the ray segments to stay within the high resolution
 region.  To do this, you must first specify the size of the high
 resolution region when creating the `LightRay` using the
-``high_res_box_size_fraction`` keyword.  This will make sure that
-the calculation of the spacing of the segment datasets only takes into account
-the high resolution region and not the full box size.  Then, in the call to
+``max_box_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+account the high resolution region and not the full box size.  If your
+high resolution region is not a perfect cube, specify the smallest side.
+Then, in the call to
 :func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
 use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
 precise location of the high resolution region.


https://bitbucket.org/yt_analysis/yt/commits/ac09d74c204d/
Changeset:   ac09d74c204d
Branch:      yt
User:        brittonsmith
Date:        2016-08-23 16:48:07+00:00
Summary:     Forgot to import numpy.
Affected #:  1 file

diff -r 9770523cd6df033f2cade8fc023d000aabf4bcf6 -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -10,6 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.testing import \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay


https://bitbucket.org/yt_analysis/yt/commits/dbaff0bb2cd1/
Changeset:   dbaff0bb2cd1
Branch:      yt
User:        brittonsmith
Date:        2016-08-23 16:56:30+00:00
Summary:     Merging with tip.
Affected #:  34 files

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,11 +67,13 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_001:
+  local_absorption_spectrum_004:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
 
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -203,6 +203,13 @@
             input_ds = input_file
         field_data = input_ds.all_data()
 
+        # temperature field required to calculate voigt profile widths
+        if ('temperature' not in input_ds.derived_field_list) and \
+           (('gas', 'temperature') not in input_ds.derived_field_list):
+            raise RuntimeError(
+                "('gas', 'temperature') field required to be present in %s "
+                "for AbsorptionSpectrum to function." % input_file)
+
         self.tau_field = np.zeros(self.lambda_field.size)
         self.absorbers_list = []
 
@@ -210,6 +217,7 @@
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
 
+        mylog.info("Creating spectrum")
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
@@ -268,47 +276,96 @@
                 redshift_eff = ((1 + redshift) * \
                                 (1 + field_data['redshift_dopp'])) - 1.
 
+        if not use_peculiar_velocity:
+            redshift_eff = redshift
+
         return redshift, redshift_eff
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
-        Add continuum features to the spectrum.
+        Add continuum features to the spectrum.  Continuua are recorded as
+        a name, associated field, wavelength, normalization value, and index.
+        Continuua are applied at and below the denoted wavelength, where the
+        optical depth decreases as a power law of desired index.  For positive 
+        index values, this means optical depth is highest at the denoted 
+        wavelength, and it drops with shorter and shorter wavelengths.  
+        Consequently, transmitted flux undergoes a discontinuous cutoff at the 
+        denoted wavelength, and then slowly increases with decreasing wavelength 
+        according to the power law.
         """
         # Change the redshifts of continuum sources to account for the
         # redshift at which the observer sits
         redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                  use_peculiar_velocity, observing_redshift)
 
-        # Only add continuum features down to tau of 1.e-4.
-        min_tau = 1.e-3
+        # min_tau is the minimum optical depth value that warrants 
+        # accounting for an absorber.  for a single absorber, noticeable 
+        # continuum effects begin for tau = 1e-3 (leading to transmitted 
+        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
+        # absorbers with insufficient column_density to contribute 
+        # significantly to a continuum (see below).  because lots of 
+        # low column density absorbers can add up to a significant
+        # continuum effect, we normalize min_tau by the n_absorbers.
+        n_absorbers = field_data['dl'].size
+        min_tau = 1.e-3/n_absorbers
 
         for continuum in self.continuum_list:
-            column_density = field_data[continuum['field_name']] * field_data['dl']
+
+            # Normalization is in cm**-2, so column density must be as well
+            column_density = (field_data[continuum['field_name']] * 
+                              field_data['dl']).in_units('cm**-2')
+            if (column_density == 0).all():
+                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
                 delta_lambda = continuum['wavelength'] * redshift_eff
             else:
                 delta_lambda = continuum['wavelength'] * redshift
+
+            # right index of continuum affected area is wavelength itself
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
+            right_index = np.digitize(this_wavelength, 
+                                      self.lambda_field).clip(0, self.n_lambda)
+            # left index of continuum affected area wavelength at which 
+            # optical depth reaches tau_min
             left_index = np.digitize((this_wavelength *
-                                     np.power((min_tau * continuum['normalization'] /
-                                               column_density), (1. / continuum['index']))),
-                                    self.lambda_field).clip(0, self.n_lambda)
+                              np.power((min_tau * continuum['normalization'] /
+                                        column_density),
+                                       (1. / continuum['index']))),
+                              self.lambda_field).clip(0, self.n_lambda)
 
+            # Only calculate the effects of continuua where normalized 
+            # column_density is greater than min_tau
+            # because lower column will not have significant contribution
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
+            if valid_continuua.size == 0:
+                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
+                    continuum['label'])
+                continue
+
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
+
+            # Tau value is (wavelength / continuum_wavelength)**index / 
+            #              (column_dens / norm)
+            # i.e. a power law decreasing as wavelength decreases
+
+            # Step through the absorber list and add continuum tau for each to
+            # the total optical depth for all wavelengths
             for i, lixel in enumerate(valid_continuua):
-                line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
-                                     this_wavelength[lixel]), continuum['index']) * \
-                                     column_density[lixel] / continuum['normalization']
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                cont_tau = \
+                    np.power((self.lambda_field[left_index[lixel] :
+                                                right_index[lixel]] /
+                                   this_wavelength[lixel]), \
+                              continuum['index']) * \
+                    (column_density[lixel] / continuum['normalization'])
+                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau
                 pbar.update(i)
             pbar.finish()
 
@@ -333,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density == 0).all():
+                mylog.info("Not adding line %s: insufficient column density" % line['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
@@ -376,7 +436,10 @@
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            if use_peculiar_velocity:
+                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            else:
+                vlos = np.zeros(field_data['temperature'].size)
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
@@ -413,6 +476,12 @@
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
 
+                # if there is a ray element with temperature = 0 or column
+                # density = 0, skip it
+                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
+                    pbar.update(i)
+                    continue
+
                 # the virtual window into which the line is deposited initially
                 # spans a region of 2 coarse spectral bins
                 # (one on each side of the center_index) but the window

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -33,7 +33,8 @@
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-
+ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
+FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -145,6 +146,58 @@
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_novpec():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5', use_peculiar_velocity=False)
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=False)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_novpec".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
 def test_equivalent_width_conserved():
     """
     This tests that the equivalent width of the optical depth is conserved 
@@ -360,3 +413,146 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(ISO_GALAXY)
+ at requires_answer_testing()
+def test_absorption_spectrum_with_continuum():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset and adds Lyman alpha and Lyman continuum to it
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(ISO_GALAXY)
+    lr = LightRay(ds)
+
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'Ly C'
+    field = 'H_number_density'
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_continuum".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_with_continuum.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(FIRE)
+def test_absorption_spectrum_with_zero_field():
+    """
+    This test generates an absorption spectrum with some 
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(FIRE)
+    lr = LightRay(ds)
+
+    # Define species and associated parameters to add to continuum
+    # Parameters used for both adding the transition to the spectrum
+    # and for fitting
+    # Note that for single species that produce multiple lines
+    # (as in the OVI doublet), 'numLines' will be equal to the number
+    # of lines, and f,gamma, and wavelength will have multiple values.
+
+    HI_parameters = {
+        'name': 'HI',
+        'field': 'H_number_density',
+        'f': [.4164],
+        'Gamma': [6.265E8],
+        'wavelength': [1215.67],
+        'mass': 1.00794,
+        'numLines': 1,
+        'maxN': 1E22, 'minN': 1E11,
+        'maxb': 300, 'minb': 1,
+        'maxz': 6, 'minz': 0,
+        'init_b': 30,
+        'init_N': 1E14
+    }
+
+    species_dicts = {'HI': HI_parameters}
+
+
+    # Get all fields that need to be added to the light ray
+    fields = [('gas','temperature')]
+    for s, params in species_dicts.items():
+        fields.append(params['field'])
+
+    # With a single dataset, a start_position and
+    # end_position or trajectory must be given.
+    # Trajectory should be given as (r, theta, phi)
+    lr.make_light_ray(
+        start_position=ds.arr([0., 0., 0.], 'unitary'),
+        end_position=ds.arr([1., 1., 1.], 'unitary'),
+        solution_filename='test_lightraysolution.txt',
+        data_filename='test_lightray.h5',
+        fields=fields)
+    
+    # Create an AbsorptionSpectrum object extending from
+    # lambda = 900 to lambda = 1800, with 10000 pixels
+    sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
+    
+    # Iterate over species
+    for s, params in species_dicts.items():
+        # Iterate over transitions for a single species
+        for i in range(params['numLines']):
+            # Add the lines to the spectrum
+            sp.add_line(
+                s, params['field'],
+                params['wavelength'][i], params['f'][i],
+                params['Gamma'][i], params['mass'],
+                label_threshold=1.e10)
+    
+    
+    # Make and save spectrum
+    wavelength, flux = sp.make_spectrum(
+        'test_lightray.h5',
+        output_file='test_spectrum.h5',
+        line_list_file='test_lines.txt',
+        use_peculiar_velocity=True)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,8 +21,6 @@
     load
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
-from yt.units.unit_object import \
-    Unit
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
@@ -459,7 +457,11 @@
 
         # Initialize data structures.
         self._data = {}
+        # temperature field is automatically added to fields
         if fields is None: fields = []
+        if (('gas', 'temperature') not in fields) and \
+           ('temperature' not in fields):
+           fields.append(('gas', 'temperature'))
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
@@ -665,19 +667,18 @@
               self.cosmology.t_from_z(ds["current_redshift"])
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
+
         # Only return LightRay elements with non-zero density
-        mask_field_units = ['K', 'cm**-3', 'g/cm**3']
-        mask_field_units = [Unit(u) for u in mask_field_units]
-        for f in data:
-            for u in mask_field_units:
-                if data[f].units.same_dimensions_as(u):
-                    mask = data[f] > 0
-                    if not np.any(mask):
-                        raise RuntimeError(
-                            "No zones along light ray with nonzero %s. "
-                            "Please modify your light ray trajectory." % (f,))
-                    for key in data.keys():
-                        data[key] = data[key][mask]
+        if 'temperature' in data: f = 'temperature'
+        if ('gas', 'temperature') in data: f = ('gas', 'temperature')
+        if 'temperature' in data or ('gas', 'temperature') in data:
+            mask = data[f] > 0
+            if not np.any(mask):
+                raise RuntimeError(
+                    "No zones along light ray with nonzero %s. "
+                    "Please modify your light ray trajectory." % (f,))
+            for key in data.keys():
+                data[key] = data[key][mask]
         save_as_dataset(ds, filename, data, field_types=field_types,
                         extra_attrs=extra_attrs)
 

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -888,10 +888,12 @@
         return new_unit
 
     def set_code_units(self):
-        self._set_code_unit_attributes()
         # here we override units, if overrides have been provided.
         self._override_code_units()
 
+        # set attributes like ds.length_unit
+        self._set_code_unit_attributes()
+
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -931,19 +933,22 @@
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return
-        mylog.warning("Overriding code units. This is an experimental and potentially "+
-                      "dangerous option that may yield inconsistent results, and must be used "+
-                      "very carefully, and only if you know what you want from it.")
+        mylog.warning(
+            "Overriding code units. This is an experimental and potentially "
+            "dangerous option that may yield inconsistent results, and must be "
+            "used very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
-                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
+                          ("velocity","cm/s"), ("magnetic","gauss"), 
+                          ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
                 if isinstance(val, YTQuantity):
                     val = (val.v, str(val.units))
                 elif not isinstance(val, tuple):
                     val = (val, cgs)
-                u = getattr(self, "%s_unit" % unit)
-                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
+                u = getattr(self, "%s_unit" % unit, None)
+                mylog.info("Overriding %s_unit: %g -> %g %s.",
+                           unit, u, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -25,7 +25,8 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.frontends.art.definitions import \
@@ -243,10 +244,10 @@
         mass = aM0 * 1.98892e33
 
         self.cosmological_simulation = True
-        self.mass_unit = self.quan(mass, "g*%s" % ng**3)
-        self.length_unit = self.quan(box_proper, "Mpc")
-        self.velocity_unit = self.quan(velocity, "cm/s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'mass_unit', self.quan(mass, "g*%s" % ng**3))
+        setdefaultattr(self, 'length_unit', self.quan(box_proper, "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.quan(velocity, "cm/s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     def _parse_parameter_file(self):
         """

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -30,7 +30,8 @@
     ARTIOFieldInfo
 
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.geometry_handler import \
     Index, \
     YTDataChunk
@@ -354,10 +355,13 @@
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(self.parameters["unit_m"], "g")
-        self.length_unit = self.quan(self.parameters["unit_l"], "cm")
-        self.time_unit = self.quan(self.parameters["unit_t"], "s")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(
+            self, 'mass_unit', self.quan(self.parameters["unit_m"], "g"))
+        setdefaultattr(
+            self, 'length_unit', self.quan(self.parameters["unit_l"], "cm"))
+        setdefaultattr(
+            self, 'time_unit', self.quan(self.parameters["unit_t"], "s"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
     def _parse_parameter_file(self):
         # hard-coded -- not provided by headers

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -471,12 +471,15 @@
 
     def _set_code_unit_attributes(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical _units based on the
+        parameter file
         """
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may be overridden later.
+            # We set these to cgs for now, but they may have been overriden
+            if getattr(self, unit+'_unit', None) is not None:
+                continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
         self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -34,6 +34,7 @@
         ("cell_centered_B_x", (b_units, [], None)),
         ("cell_centered_B_y", (b_units, [], None)),
         ("cell_centered_B_z", (b_units, [], None)),
+        ("gravitational_potential", ("code_velocity**2", ["gravitational_potential"], None)),
     )
 
 # In Athena, conservative or primitive variables may be written out.

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -49,6 +49,20 @@
         test_blast.__name__ = test.description
         yield test
 
+uo_blast = {
+    'length_unit': (1.0, 'pc'),
+    'mass_unit': (2.38858753789e-24, 'g/cm**3*pc**3'),
+    'time_unit': (1.0, 's*pc/km'),
+}
+
+ at requires_file(blast)
+def test_blast_override():
+    # verify that overriding units causes derived unit values to be updated.
+    # see issue #1259
+    ds = load(blast, units_override=uo_blast)
+    assert_equal(float(ds.magnetic_unit.in_units('gauss')),
+                 5.478674679698131e-07)
+
 uo_stripping = {"time_unit":3.086e14,
                 "length_unit":8.0236e22,
                 "mass_unit":9.999e-30*8.0236e22**3}

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -22,8 +22,9 @@
 import numpy as np
 
 from yt.funcs import \
+    ensure_tuple, \
     mylog, \
-    ensure_tuple
+    setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
@@ -608,10 +609,10 @@
             self._setup2d()
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
 
     def _setup1d(self):
         # self._index_class = BoxlibHierarchy1D
@@ -1016,10 +1017,11 @@
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(1.0, "Msun")
-        self.time_unit = self.quan(1.0 / 3.08568025e19, "s")
-        self.length_unit = self.quan(1.0 / (1 + self.current_redshift), "Mpc")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0 / 3.08568025e19, "s"))
+        setdefaultattr(self, 'length_unit',
+                       self.quan(1.0 / (1 + self.current_redshift), "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
 def _guess_pcast(vals):
     # Now we guess some things about the parameter and its type

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -23,7 +23,9 @@
 from stat import \
     ST_CTIME
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.extern import six
@@ -275,14 +277,19 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        mylog.warning("Setting code length to be 1.0 cm")
-        mylog.warning("Setting code mass to be 1.0 g")
-        mylog.warning("Setting code time to be 1.0 s")
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.magnetic_unit = self.quan(np.sqrt(4.*np.pi), "gauss")
-        self.velocity_unit = self.length_unit / self.time_unit
+        if not hasattr(self, 'length_unit'):
+            mylog.warning("Setting code length unit to be 1.0 cm")
+        if not hasattr(self, 'mass_unit'):
+            mylog.warning("Setting code mass unit to be 1.0 g")
+        if not hasattr(self, 'time_unit'):
+            mylog.warning("Setting code time unit to be 1.0 s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'magnetic_unit',
+                       self.quan(np.sqrt(4.*np.pi), "gauss"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.length_unit / self.time_unit)
 
     def _localize(self, f, default):
         if f is None:

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -28,7 +28,8 @@
 from yt.funcs import \
     ensure_list, \
     ensure_tuple, \
-    get_pbar
+    get_pbar, \
+    setdefaultattr
 from yt.config import ytcfg
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -917,11 +918,12 @@
             if box_size is None:
                 box_size = self.parameters["Physics"]["Cosmology"]\
                     ["CosmologyComovingBoxSize"]
-            self.length_unit = self.quan(box_size, "Mpccm/h")
-            self.mass_unit = \
-                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3
-            self.time_unit = self.quan(k['utim'], 's')
-            self.velocity_unit = self.quan(k['uvel'], 'cm/s')
+            setdefaultattr(self, 'length_unit', self.quan(box_size, "Mpccm/h"))
+            setdefaultattr(
+                self, 'mass_unit',
+                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3)
+            setdefaultattr(self, 'time_unit', self.quan(k['utim'], 's'))
+            setdefaultattr(self, 'velocity_unit', self.quan(k['uvel'], 'cm/s'))
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
@@ -937,15 +939,16 @@
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
                 length_unit = mass_unit = time_unit = 1.0
 
-            self.length_unit = self.quan(length_unit, "cm")
-            self.mass_unit = self.quan(mass_unit, "g")
-            self.time_unit = self.quan(time_unit, "s")
-            self.velocity_unit = self.length_unit / self.time_unit
+            setdefaultattr(self, 'length_unit', self.quan(length_unit, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+            setdefaultattr(
+                self, 'velocity_unit', self.length_unit / self.time_unit)
 
         magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                 (self.time_unit**2 * self.length_unit))
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
 
     def cosmology_get_units(self):
         """

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -14,6 +14,8 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.unstructured_mesh import \
@@ -163,9 +165,9 @@
         # should be set, along with examples of how to set them to standard
         # values.
         #
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
         #
         # These can also be set:
         # self.velocity_unit = self.quan(1.0, "cm/s")

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -25,8 +25,9 @@
 
 from yt.config import ytcfg
 from yt.funcs import \
+    ensure_list, \
     mylog, \
-    ensure_list
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -447,10 +448,10 @@
             mylog.warning("No length conversion provided. Assuming 1 = 1 cm.")
             length_factor = 1.0
             length_unit = "cm"
-        self.length_unit = self.quan(length_factor,length_unit)
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(length_factor,length_unit))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
         if "beam_size" in self.specified_parameters:
             beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -22,7 +22,9 @@
     AMRGridPatch
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.geometry.particle_geometry_handler import \
@@ -246,13 +248,14 @@
         else:
             length_factor = 1.0
             temperature_factor = 1.0
-        self.magnetic_unit = self.quan(b_factor, "gauss")
 
-        self.length_unit = self.quan(length_factor, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-        self.temperature_unit = self.quan(temperature_factor, "K")
+        setdefaultattr(self, 'magnetic_unit', self.quan(b_factor, "gauss"))
+        setdefaultattr(self, 'length_unit', self.quan(length_factor, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
+        setdefaultattr(
+            self, 'temperature_unit', self.quan(temperature_factor, "K"))
 
     def set_code_units(self):
         super(FLASHDataset, self).set_code_units()

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -33,7 +33,8 @@
     GadgetFOFFieldInfo, \
     GadgetFOFHaloFieldInfo
 from yt.funcs import \
-    only_on_root
+    only_on_root, \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.utilities.cosmology import \
@@ -232,7 +233,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
         
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -244,7 +246,8 @@
             else:
                 velocity_unit = (1e5, "cmcm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -259,7 +262,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -267,7 +270,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     def __repr__(self):
         return self.basename.split(".", 1)[0]

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -18,7 +18,9 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -205,7 +207,7 @@
                           "Use units_override to specify the units")
 
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            setattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
+            setdefaultattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
 
             if len(self.units_override) == 0:
                 mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -19,7 +19,9 @@
 import os
 from yt.extern.six import string_types
 from yt.funcs import \
-    just_one, ensure_tuple
+    ensure_tuple, \
+    just_one, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -223,17 +225,17 @@
                     un = unit_name[:-5]
                     un = un.replace('magnetic', 'magnetic_field', 1)
                     unit = self.unit_system[un]
-                    setattr(self, unit_name, self.quan(value, unit))
-                setattr(self, unit_name, self.quan(value, unit))
+                    setdefaultattr(self, unit_name, self.quan(value, unit))
+                setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:
                     if unit_name in self.field_units:
                         mylog.warning("'field_units' was overridden by 'dataset_units/%s'"
                                       % (unit_name))
                     self.field_units[unit_name] = str(unit)
         else:
-            self.length_unit = self.quan(1.0, "cm")
-            self.mass_unit = self.quan(1.0, "g")
-            self.time_unit = self.quan(1.0, "s")
+            setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
         h5f.close()
 

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -23,6 +23,8 @@
 from .fields import \
     HaloCatalogFieldInfo
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.data_objects.static_output import \
@@ -76,10 +78,10 @@
         self.parameters.update(hvals)
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm / s"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -19,6 +19,8 @@
 import weakref
 from yt.data_objects.unstructured_mesh import \
     SemiStructuredMesh
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.static_output import \
@@ -78,9 +80,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
@@ -161,9 +163,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         #  not sure if this import has side-effects so I'm not deleting it

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -24,7 +24,9 @@
 from .fields import \
     OWLSSubfindFieldInfo
 
-from yt.funcs import only_on_root
+from yt.funcs import \
+    only_on_root, \
+    setdefaultattr
 from yt.utilities.exceptions import \
     YTException
 from yt.utilities.logger import ytLogger as \
@@ -176,7 +178,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
 
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -185,7 +188,8 @@
         else:
             velocity_unit = (1e5, "cm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -200,7 +204,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -208,7 +212,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,8 @@
 
 from yt.extern.six import string_types
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_geometry_handler import \
     OctreeIndex
 from yt.geometry.geometry_handler import \
@@ -565,17 +566,21 @@
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
         mean_molecular_weight_factor = _X**-1
 
-        self.density_unit = self.quan(density_unit, 'g/cm**3')
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
-        self.time_unit = self.quan(time_unit, "s")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2*mp* 
-                                 mean_molecular_weight_factor/kb).in_units('K')
+        setdefaultattr(self, 'density_unit', self.quan(density_unit, 'g/cm**3'))
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
+        setdefaultattr(self, 'pressure_unit',
+                       self.quan(pressure_unit, 'dyne/cm**2'))
+        setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(length_unit, 'cm') / self.time_unit)
+        temperature_unit = (
+            self.velocity_unit**2*mp*mean_molecular_weight_factor/kb)
+        setdefaultattr(self, 'temperature_unit', temperature_unit.in_units('K'))
 
         # Only the length unit get scales by a factor of boxlen
-        self.length_unit = self.quan(length_unit * boxlen, "cm")
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit * boxlen, "cm"))
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -22,12 +22,14 @@
 from .fields import \
     RockstarFieldInfo
 
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
+from yt.funcs import \
+    setdefaultattr
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.cosmology import Cosmology
 import yt.utilities.fortran_utils as fpu
 
 from .definitions import \
@@ -92,10 +94,10 @@
 
     def _set_code_unit_attributes(self):
         z = self.current_redshift
-        self.length_unit = self.quan(1.0 / (1.0+z), "Mpc / h")
-        self.mass_unit = self.quan(1.0, "Msun / h")
-        self.velocity_unit = self.quan(1.0, "km / s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'length_unit', self.quan(1.0 / (1.0+z), "Mpc / h"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun / h"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "km / s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -28,7 +28,8 @@
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
 from yt.funcs import \
-    get_requests
+    get_requests, \
+    setdefaultattr
 from .fields import \
     SDFFieldInfo
 from yt.utilities.sdf import \
@@ -177,16 +178,22 @@
         return self._midx
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
-        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
-        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        setdefaultattr(
+            self, 'length_unit',
+            self.quan(1.0, self.parameters.get("length_unit", 'kpc')))
+        setdefaultattr(
+            self, 'velocity_unit',
+            self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr')))
+        setdefaultattr(
+            self, 'time_unit',
+            self.quan(1.0, self.parameters.get("time_unit", 'Gyr')))
         mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
         if ' ' in mass_unit:
             factor, unit = mass_unit.split(' ')
         else:
             factor = 1.0
             unit = mass_unit
-        self.mass_unit = self.quan(float(factor), unit)
+        setdefaultattr(self, 'mass_unit', self.quan(float(factor), unit))
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -1004,3 +1004,12 @@
 
 def get_interactivity():
     return interactivity
+
+def setdefaultattr(obj, name, value):
+    """Set attribute with *name* on *obj* with *value* if it doesn't exist yet
+
+    Analogous to dict.setdefault
+    """
+    if not hasattr(obj, name):
+        setattr(obj, name, value)
+    return getattr(obj, name)

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -819,6 +819,10 @@
             kwargs = self.kwargs
         return self.array_func(*args, **kwargs)
     def compare(self, new_result, old_result):
+        if not isinstance(new_result, dict):
+            new_result = {'answer': new_result}
+            old_result = {'answer': old_result}
+
         assert_equal(len(new_result), len(old_result),
                                           err_msg="Number of outputs not equal.",
                                           verbose=True)

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -243,16 +243,18 @@
     def frb():
         doc = "The frb property."
         def fget(self):
-            if self._frb is None:
+            if self._frb is None or self._data_valid is False:
                 self._recreate_frb()
             return self._frb
 
         def fset(self, value):
             self._frb = value
+            self._data_valid = True
 
         def fdel(self):
             del self._frb
             self._frb = None
+            self._data_valid = False
 
         return locals()
     frb = property(**frb())
@@ -261,8 +263,8 @@
         old_fields = None
         # If we are regenerating an frb, we want to know what fields we had before
         if self._frb is not None:
-            old_fields = list(self.frb.keys())
-            old_units = [str(self.frb[of].units) for of in old_fields]
+            old_fields = list(self._frb.keys())
+            old_units = [str(self._frb[of].units) for of in old_fields]
 
         # Set the bounds
         if hasattr(self,'zlim'):
@@ -273,12 +275,11 @@
             bounds = np.array([b.in_units('code_length') for b in bounds])
 
         # Generate the FRB
-        self._frb = self._frb_generator(self.data_source, bounds,
-                                        self.buff_size, self.antialias,
-                                        periodic=self._periodic)
+        self.frb = self._frb_generator(self.data_source, bounds,
+                                       self.buff_size, self.antialias,
+                                       periodic=self._periodic)
 
         # At this point the frb has the valid bounds, size, aliasing, etc.
-        self._data_valid = True
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:

diff -r ac09d74c204d6c9f4c2b2c47961757dcc860fd7c -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -444,3 +444,9 @@
             assert_raises(
                 YTInvalidFieldType, object, ds, normal, field_name_list)
 
+
+def test_frb_regen():
+    ds = fake_random_ds(32)
+    slc = SlicePlot(ds, 2, 'density')
+    slc.set_buff_size(1200)
+    assert_equal(slc.frb['density'].shape, (1200, 1200))


https://bitbucket.org/yt_analysis/yt/commits/2f1647ac9408/
Changeset:   2f1647ac9408
Branch:      yt
User:        brittonsmith
Date:        2016-08-24 07:19:36+00:00
Summary:     Pass correct ray length and be careful qith ytquantities.
Affected #:  1 file

diff -r dbaff0bb2cd1656312aefad3865e2b87c67189e6 -r 2f1647ac940850bcc7b929fe12d852102e838fa9 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -233,7 +233,7 @@
                 self.light_ray_solution[q]['traversal_box_fraction'] = \
                     self.cosmology.comoving_radial_distance(z_next, \
                         self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
-                        self.simulation.box_size  
+                        self.simulation.box_size
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
@@ -249,12 +249,11 @@
                     else:
                         ds = load(self.light_ray_solution[q]["filename"])
                         ray_length = \
-                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'].d,
                                   "unitary")
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
-                          non_periodic_ray(ds, left_edge, right_edge,
-                            self.light_ray_solution[q]['traversal_box_fraction'],
+                          non_periodic_ray(ds, left_edge, right_edge, ray_length,
                                            my_random=my_random, min_level=min_level)
                         del ds
                 else:


https://bitbucket.org/yt_analysis/yt/commits/333454532115/
Changeset:   333454532115
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 10:02:14+00:00
Summary:     Be more careful with assumed units.
Affected #:  1 file

diff -r 2f1647ac940850bcc7b929fe12d852102e838fa9 -r 333454532115561b090cd3ceefb2e63f0f6597ff yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -316,12 +316,14 @@
         left_edge : optional, iterable of floats or YTArray
             The left corner of the region in which rays are to be
             generated.  If None, the left edge will be that of the
-            domain.
+            domain.  If specified without units, it is assumed to
+            be in code units.
             Default: None.
         right_edge : optional, iterable of floats or YTArray
             The right corner of the region in which rays are to be
             generated.  If None, the right edge will be that of the
-            domain.
+            domain.  If specified without units, it is assumed to
+            be in code units.
             Default: None.
         min_level : optional, int
             The minimum refinement level of the spatial region in which
@@ -412,29 +414,32 @@
         else:
             domain = self.simulation
 
+        assumed_units = "code_length"
         if left_edge is None:
             left_edge = domain.domain_left_edge
         elif not hasattr(left_edge, 'units'):
-            left_edge = domain.arr(left_edge, 'code_length')
+            left_edge = domain.arr(left_edge, assumed_units)
         left_edge.convert_to_units('unitary')
 
         if right_edge is None:
             right_edge = domain.domain_right_edge
         elif not hasattr(right_edge, 'units'):
-            right_edge = domain.arr(right_edge, 'code_length')
+            right_edge = domain.arr(right_edge, assumed_units)
         right_edge.convert_to_units('unitary')
 
-        if start_position is not None and hasattr(start_position, 'units'):
-            start_position = start_position.to('unitary')
+        if start_position is not None and \
+          hasattr(start_position, 'units'):
+            start_position = start_position
         elif start_position is not None :
-            start_position = self.ds.arr(
-                start_position, 'code_length').to('unitary')
+            start_position = self.ds.arr(start_position, assumed_units)
+        start_position.convert_to_units('unitary')
 
-        if end_position is not None and hasattr(end_position, 'units'):
-            end_position = end_position.to('unitary')
+        if end_position is not None and \
+          hasattr(end_position, 'units'):
+            end_position = end_position
         elif end_position is not None :
-            end_position = self.ds.arr(
-                end_position, 'code_length').to('unitary')
+            end_position = self.ds.arr(end_position, assumed_units)
+        end_position.convert_to_units('unitary')
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity


https://bitbucket.org/yt_analysis/yt/commits/0f9102ed09cb/
Changeset:   0f9102ed09cb
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 10:12:38+00:00
Summary:     Don't let np.asarray strip units and make sure periodic_ray gets correct units.
Affected #:  1 file

diff -r 333454532115561b090cd3ceefb2e63f0f6597ff -r 0f9102ed09cbe789919bffa83e6028b4af1711b2 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -201,9 +201,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.asarray(start_position)
+            self.light_ray_solution[0]['start'] = start_position
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.asarray(end_position)
+                self.light_ray_solution[0]['end'] = end_position
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -525,9 +525,10 @@
                         my_segment['end']))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=left_edge,
-                                        right=right_edge)
+            sub_segments = periodic_ray(my_segment['start'].to("code_length"),
+                                        my_segment['end'].to("code_length"),
+                                        left=left_edge.to("code_length"),
+                                        right=right_edge.to("code_length"))
 
             # Prepare data structure for subsegment.
             sub_data = {}


https://bitbucket.org/yt_analysis/yt/commits/add189c39dcc/
Changeset:   add189c39dcc
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 10:20:58+00:00
Summary:     Remove last place where we force units on something and fix if tree.
Affected #:  1 file

diff -r 0f9102ed09cbe789919bffa83e6028b4af1711b2 -r add189c39dcc56910689696f987b5c3549c2b16f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -427,19 +427,19 @@
             right_edge = domain.arr(right_edge, assumed_units)
         right_edge.convert_to_units('unitary')
 
-        if start_position is not None and \
-          hasattr(start_position, 'units'):
-            start_position = start_position
-        elif start_position is not None :
-            start_position = self.ds.arr(start_position, assumed_units)
-        start_position.convert_to_units('unitary')
+        if start_position is not None:
+            if hasattr(start_position, 'units'):
+                start_position = start_position
+            else:
+                start_position = self.ds.arr(start_position, assumed_units)
+            start_position.convert_to_units('unitary')
 
-        if end_position is not None and \
-          hasattr(end_position, 'units'):
-            end_position = end_position
-        elif end_position is not None :
-            end_position = self.ds.arr(end_position, assumed_units)
-        end_position.convert_to_units('unitary')
+        if end_position is not None:
+            if hasattr(end_position, 'units'):
+                end_position = end_position
+            else:
+                end_position = self.ds.arr(end_position, assumed_units)
+            end_position.convert_to_units('unitary')
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
@@ -499,10 +499,6 @@
             if setup_function is not None:
                 setup_function(ds)
 
-            if start_position is not None:
-                my_segment["start"] = ds.arr(my_segment["start"], "unitary")
-                my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:


https://bitbucket.org/yt_analysis/yt/commits/f3630a595d0f/
Changeset:   f3630a595d0f
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 16:27:39+00:00
Summary:     Getting rid of operations that don't preserve units and allowing periodic_ray to return YTArrays.
Affected #:  1 file

diff -r add189c39dcc56910689696f987b5c3549c2b16f -r f3630a595d0f59d3c74fe3da3de53bc527159187 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -521,10 +521,10 @@
                         my_segment['end']))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'].to("code_length"),
-                                        my_segment['end'].to("code_length"),
-                                        left=left_edge.to("code_length"),
-                                        right=right_edge.to("code_length"))
+            sub_segments = periodic_ray(my_segment['start'],
+                                        my_segment['end'],
+                                        left=left_edge,
+                                        right=right_edge)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -577,7 +577,7 @@
                     # sight) and the velocity vectors: a dot b = ab cos(theta)
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
-                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
                     # Protect against stituations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
@@ -791,7 +791,7 @@
     dim = right - left
 
     vector = end - start
-    wall = np.zeros(start.shape)
+    wall = np.zeros_like(start)
     close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
@@ -811,7 +811,6 @@
     this_end = end.copy()
     t = 0.0
     tolerance = 1e-6
-
     while t < 1.0 - tolerance:
         hit_left = (this_start <= left) & (vector < 0)
         if (hit_left).any():
@@ -829,7 +828,7 @@
         now = this_start + vector * dt
         close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([np.copy(this_start), np.copy(now)])
+        segments.append([this_start.copy(), now.copy()])
         this_start = now.copy()
         t += dt
 


https://bitbucket.org/yt_analysis/yt/commits/e5a12119efc0/
Changeset:   e5a12119efc0
Branch:      yt
User:        brittonsmith
Date:        2016-08-25 17:42:25+00:00
Summary:     Merging.
Affected #:  30 files

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -23,6 +23,7 @@
 yt/geometry/particle_smooth.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
+yt/utilities/lib/autogenerated_element_samplers.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
@@ -33,6 +34,7 @@
 yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
+yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+include yt/utilities/mesh_types.yaml
 exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1429,25 +1429,24 @@
         YT_DEPS+=('netcdf4')   
     fi
     
-    # Here is our dependency list for yt
-    log_cmd conda update --yes conda
+    log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
     done
 
     if [ $INST_PY3 -eq 1 ]
     then
         echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd pip install python-hglib
+    log_cmd ${DEST_DIR}/bin/pip install python-hglib
 
-    log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1474,17 +1473,17 @@
         ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
         log_cmd unzip ${DEST_DIR}/src/master.zip
         pushd ${DEST_DIR}/src/pyembree-master &> /dev/null
-        log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+        log_cmd ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
         popd &> /dev/null
     fi
 
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
-        ROCKSTAR_PACKAGE=$(conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
-        log_cmd conda build ${DEST_DIR}/src/yt_conda/rockstar
-        log_cmd conda install $ROCKSTAR_PACKAGE
+        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
+        log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
+        log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
         ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
     fi
 
@@ -1493,20 +1492,20 @@
     then
         if [ $INST_PY3 -eq 1 ]
         then
-            log_cmd pip install pyx
+            log_cmd ${DEST_DIR}/bin/pip install pyx
         else
-            log_cmd pip install pyx==0.12.1
+            log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1
         fi
     fi
 
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install -c conda-forge --yes yt
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
@@ -1517,7 +1516,7 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,6 +114,9 @@
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.autogenerated_element_samplers",
+              ["yt/utilities/lib/autogenerated_element_samplers.pyx"],
+              include_dirs=["yt/utilities/lib/"]),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs),
@@ -193,7 +196,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses",
+    "amr_kdtools", "lenses", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -11,7 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.field_data import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.particle_mesh_operations import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -28,7 +28,8 @@
 from yt.data_objects.data_containers import \
     YTSelectionContainer1D, \
     YTSelectionContainer2D, \
-    YTSelectionContainer3D, \
+    YTSelectionContainer3D
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.funcs import \
     ensure_list, \

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -65,6 +65,8 @@
 from yt.geometry.selection_routines import \
     compose_selector
 from yt.extern.six import add_metaclass, string_types
+from yt.data_objects.field_data import YTFieldData
+from yt.data_objects.profiles import create_profile
 
 data_object_registry = {}
 
@@ -91,12 +93,6 @@
         return tr
     return save_state
 
-class YTFieldData(dict):
-    """
-    A Container object for field data, instead of just having it be a dict.
-    """
-    pass
-
 class RegisteredDataContainer(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -813,8 +809,77 @@
         ex = self._compute_extrema(field)
         return ex[1] - ex[0]
 
-    def hist(self, field, weight = None, bins = None):
-        raise NotImplementedError
+    def profile(self, bin_fields, fields, n_bins=64,
+                extrema=None, logs=None, units=None,
+                weight_field="cell_mass",
+                accumulation=False, fractional=False,
+                deposition='ngp'):
+        r"""
+        Create a 1, 2, or 3D profile object from this data_source.
+
+        The dimensionality of the profile object is chosen by the number of
+        fields given in the bin_fields argument.  This simply calls
+        :func:`yt.data_objects.profiles.create_profile`.
+
+        Parameters
+        ----------
+        bin_fields : list of strings
+            List of the binning fields for profiling.
+        fields : list of strings
+            The fields to be profiled.
+        n_bins : int or list of ints
+            The number of bins in each dimension.  If None, 64 bins for
+            each bin are used for each bin field.
+            Default: 64.
+        extrema : dict of min, max tuples
+            Minimum and maximum values of the bin_fields for the profiles.
+            The keys correspond to the field names. Defaults to the extrema
+            of the bin_fields of the dataset. If a units dict is provided, extrema
+            are understood to be in the units specified in the dictionary.
+        logs : dict of boolean values
+            Whether or not to log the bin_fields for the profiles.
+            The keys correspond to the field names. Defaults to the take_log
+            attribute of the field.
+        units : dict of strings
+            The units of the fields in the profiles, including the bin_fields.
+        weight_field : str or tuple field identifier
+            The weight field for computing weighted average for the profile
+            values.  If None, the profile values are sums of the data in
+            each bin.
+        accumulation : bool or list of bools
+            If True, the profile values for a bin n are the cumulative sum of
+            all the values from bin 0 to n.  If -True, the sum is reversed so
+            that the value for bin n is the cumulative sum from bin N (total bins)
+            to n.  If the profile is 2D or 3D, a list of values can be given to
+            control the summation in each dimension independently.
+            Default: False.
+        fractional : If True the profile values are divided by the sum of all
+            the profile data such that the profile represents a probability
+            distribution function.
+        deposition : Controls the type of deposition used for ParticlePhasePlots.
+            Valid choices are 'ngp' and 'cic'. Default is 'ngp'. This parameter is
+            ignored the if the input fields are not of particle type.
+
+
+        Examples
+        --------
+
+        Create a 1d profile.  Access bin field from profile.x and field
+        data from profile[<field_name>].
+
+        >>> ds = load("DD0046/DD0046")
+        >>> ad = ds.all_data()
+        >>> profile = ad.profile(ad, [("gas", "density")],
+        ...                          [("gas", "temperature"),
+        ...                          ("gas", "velocity_x")])
+        >>> print (profile.x)
+        >>> print (profile["gas", "temperature"])
+        >>> plot = profile.plot()
+        """
+        p = create_profile(self, bin_fields, fields, n_bins,
+                   extrema, logs, units, weight_field, accumulation,
+                   fractional, deposition)
+        return p
 
     def mean(self, field, axis=None, weight='ones'):
         r"""Compute the mean of a field, optionally along an axis, with a

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/field_data.py
--- /dev/null
+++ b/yt/data_objects/field_data.py
@@ -0,0 +1,20 @@
+"""
+The YTFieldData object.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class YTFieldData(dict):
+    """
+    A Container object for field data, instead of just having it be a dict.
+    """
+    pass

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -17,8 +17,9 @@
 import numpy as np
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 from yt.geometry.selection_routines import convert_mask_to_indices
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.exceptions import \

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -17,8 +17,9 @@
 import numpy as np
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 import yt.geometry.particle_deposit as particle_deposit
 import yt.geometry.particle_smooth as particle_smooth
 

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -21,11 +21,12 @@
     ensure_list, \
     mylog
 from yt.extern.six import add_metaclass
+from yt.data_objects.field_data import \
+    YTFieldData
 
 particle_handler_registry = defaultdict()
 
 def particle_converter(func):
-    from .data_containers import YTFieldData
     def save_state(grid):
         old_params = grid.field_parameters
         old_keys = grid.field_data.keys()

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,9 @@
     array_like_field, \
     YTQuantity
 from yt.units.unit_object import Unit
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.field_data import YTFieldData
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
 from yt.utilities.lib.misc_utilities import \
     new_bin_profile1d, \
     new_bin_profile2d, \
@@ -453,6 +455,14 @@
     def bounds(self):
         return ((self.x_bins[0], self.x_bins[-1]),)
 
+    def plot(self):
+        r"""
+        This returns a :class:~yt.visualization.profile_plotter.ProfilePlot
+        with the fields that have been added to this object.
+        """
+        from yt.visualization.profile_plotter import ProfilePlot
+        return ProfilePlot.from_profiles(self)
+
 class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
     """
     A 1D profile object loaded from a ytdata dataset.
@@ -572,6 +582,14 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
+    def plot(self):
+        r"""
+        This returns a :class:~yt.visualization.profile_plotter.PhasePlot with
+        the fields that have been added to this object.
+        """
+        from yt.visualization.profile_plotter import PhasePlot
+        return PhasePlot.from_profile(self)
+
 class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
     """
     A 2D profile object loaded from a ytdata dataset.
@@ -928,7 +946,7 @@
     data from profile[<field_name>].
 
     >>> ds = load("DD0046/DD0046")
-    >>> ad = ds.h.all_data()
+    >>> ad = ds.all_data()
     >>> profile = create_profile(ad, [("gas", "density")],
     ...                              [("gas", "temperature"),
     ...                               ("gas", "velocity_x")])
@@ -940,10 +958,18 @@
     fields = ensure_list(fields)
     is_pfield = [data_source.ds._get_field_info(f).particle_type
                  for f in bin_fields + fields]
+    wf = None
+    if weight_field is not None:
+        wf = data_source.ds._get_field_info(weight_field)
+        is_pfield.append(wf.particle_type)
+        wf = wf.name
 
-    if len(bin_fields) == 1:
+    if any(is_pfield) and not all(is_pfield):
+        raise YTIllDefinedProfile(
+            bin_fields, data_source._determine_fields(fields), wf, is_pfield)
+    elif len(bin_fields) == 1:
         cls = Profile1D
-    elif len(bin_fields) == 2 and np.all(is_pfield):
+    elif len(bin_fields) == 2 and all(is_pfield):
         # log bin_fields set to False for Particle Profiles.
         # doesn't make much sense for CIC deposition.
         # accumulation and fractional set to False as well.

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -8,7 +8,13 @@
 from yt.testing import \
     fake_random_ds, \
     assert_equal, \
+    assert_raises, \
     assert_rel_equal
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
+from yt.visualization.profile_plotter import \
+    ProfilePlot, \
+    PhasePlot
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
@@ -158,3 +164,34 @@
                         weight_field = None)
         p3d.add_fields(["particle_ones"])
         yield assert_equal, p3d["particle_ones"].sum(), 32**3
+
+def test_mixed_particle_mesh_profiles():
+    ds = fake_random_ds(32, particles=10)
+    ad = ds.all_data()
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'particle_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'particle_ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'particle_mass',
+        'velocity_x')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'particle_ones')

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -22,8 +22,9 @@
     fill_fcoords, fill_fwidths
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 import yt.geometry.particle_deposit as particle_deposit
 
 class UnstructuredMesh(YTSelectionContainer):

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -40,7 +40,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
-from yt.data_objects.data_containers import \
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.utilities.exceptions import \
     YTParticleDepositionNotImplemented

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -26,7 +26,7 @@
     iterable, \
     ensure_list
 from yt.utilities.io_handler import io_registry
-from yt.data_objects.data_containers import \
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.data_objects.particle_unions import \
     ParticleUnion

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -23,15 +23,12 @@
 from yt.utilities.lib.fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport kernel_func, get_kernel_func, gind
+from yt.utilities.lib.distance_queue cimport NeighborList, Neighbor_compare, \
+    r2dist, DistanceQueue
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
 
-cdef struct NeighborList
-cdef struct NeighborList:
-    np.int64_t pn       # Particle number
-    np.float64_t r2     # radius**2
-
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef kernel_func sph_kernel
@@ -39,10 +36,8 @@
     cdef np.float64_t DW[3]
     cdef int nfields
     cdef int maxn
-    cdef int curn
     cdef bint periodicity[3]
     # Note that we are preallocating here, so this is *not* threadsafe.
-    cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t[:,:] ppos,
@@ -52,7 +47,7 @@
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds)
+                               np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
@@ -65,10 +60,7 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3])
-    cdef void neighbor_reset(self)
+                               int *nsize, DistanceQueue dq)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
@@ -78,7 +70,7 @@
                             np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
-                            np.float64_t[:,:] oct_dds)
+                            np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields)
+                      np.float64_t **index_fields, DistanceQueue dq)

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -24,44 +24,6 @@
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
 
-cdef int Neighbor_compare(void *on1, void *on2) nogil:
-    cdef NeighborList *n1
-    cdef NeighborList *n2
-    n1 = <NeighborList *> on1
-    n2 = <NeighborList *> on2
-    # Note that we set this up so that "greatest" evaluates to the *end* of the
-    # list, so we can do standard radius comparisons.
-    if n1.r2 < n2.r2:
-        return -1
-    elif n1.r2 == n2.r2:
-        return 0
-    else:
-        return 1
-
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.initializedcheck(False)
-cdef np.float64_t r2dist(np.float64_t ppos[3],
-                         np.float64_t cpos[3],
-                         np.float64_t DW[3],
-                         bint periodicity[3],
-                         np.float64_t max_dist2):
-    cdef int i
-    cdef np.float64_t r2, DR
-    r2 = 0.0
-    for i in range(3):
-        DR = (ppos[i] - cpos[i])
-        if not periodicity[i]:
-            pass
-        elif (DR > DW[i]/2.0):
-            DR -= DW[i]
-        elif (DR < -DW[i]/2.0):
-            DR += DW[i]
-        r2 += DR * DR
-        if max_dist2 >= 0.0 and r2 > max_dist2:
-            return -1.0
-    return r2
 
 cdef void spherical_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]):
     opos[0] = ipos[0] * sin(ipos[1]) * cos(ipos[2])
@@ -80,10 +42,6 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
-
-        self.neighbors = <NeighborList *> malloc(
-            sizeof(NeighborList) * self.maxn)
-        self.neighbor_reset()
         self.sph_kernel = get_kernel_func(kernel_name)
 
     def initialize(self, *args):
@@ -247,6 +205,9 @@
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(oct_positions.shape[0]):
             for j in range(3):
                 pos[j] = oct_positions[i, j]
@@ -260,7 +221,7 @@
                 dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
                 &nind, pind, pcount, offset, index_field_pointers,
                 particle_octree, domain_id, &nsize, oct_left_edges,
-                oct_dds)
+                oct_dds, dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -369,6 +330,9 @@
         # refers to that oct's particles.
         cdef int maxnei = 0
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -380,7 +344,8 @@
                     pos[k] = positions[pind0, k]
                 self.neighbor_process_particle(pos, cart_positions, field_pointers,
                             doff, &nind, pind, pcount, pind0,
-                            NULL, particle_octree, domain_id, &nsize)
+                            NULL, particle_octree, domain_id, &nsize,
+                            dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -463,55 +428,9 @@
 
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **ifields):
+                      np.float64_t **ifields, DistanceQueue dq):
         raise NotImplementedError
 
-    cdef void neighbor_reset(self):
-        self.curn = 0
-        for i in range(self.maxn):
-            self.neighbors[i].pn = -1
-            self.neighbors[i].r2 = 1e300
-
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3]):
-        # Here's a python+numpy simulator of this:
-        # http://paste.yt-project.org/show/5445/
-        cdef int i, di
-        cdef np.float64_t r2, r2_trunc
-        if self.curn == self.maxn:
-            # Truncate calculation if it's bigger than this in any dimension
-            r2_trunc = self.neighbors[self.curn - 1].r2
-        else:
-            # Don't truncate our calculation
-            r2_trunc = -1
-        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
-        if r2 == -1:
-            return
-        if self.curn == 0:
-            self.neighbors[0].r2 = r2
-            self.neighbors[0].pn = pn
-            self.curn += 1
-            return
-        # Now insert in a sorted way
-        di = -1
-        for i in range(self.curn - 1, -1, -1):
-            # We are checking if i is less than us, to see if we should insert
-            # to the right (i.e., i+1).
-            if self.neighbors[i].r2 < r2:
-                di = i
-                break
-        # The outermost one is already too small.
-        if di == self.maxn - 1:
-            return
-        if (self.maxn - (di + 2)) > 0:
-            memmove(<void *> (self.neighbors + di + 2),
-                    <void *> (self.neighbors + di + 1),
-                    sizeof(NeighborList) * (self.maxn - (di + 2)))
-        self.neighbors[di + 1].r2 = r2
-        self.neighbors[di + 1].pn = pn
-        if self.curn < self.maxn:
-            self.curn += 1
-
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -526,6 +445,7 @@
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
                             np.float64_t[:,:] oct_dds,
+                            DistanceQueue dq
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -535,13 +455,13 @@
         cdef np.float64_t ex[2] 
         cdef np.float64_t DR[2]
         cdef np.float64_t cp, r2_trunc, r2, dist
-        self.neighbor_reset()
+        dq.neighbor_reset()
         for ni in range(nneighbors):
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != None and self.curn == self.maxn:
-                r2_trunc = self.neighbors[self.curn - 1].r2
+            if oct_left_edges != None and dq.curn == dq.maxn:
+                r2_trunc = dq.neighbors[dq.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
                 # What this next bit does is figure out which component is the
@@ -577,7 +497,7 @@
                 pn = pinds[offset + i]
                 for j in range(3):
                     pos[j] = ppos[pn, j]
-                self.neighbor_eval(pn, pos, cpos)
+                dq.neighbor_eval(pn, pos, cpos)
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
@@ -592,7 +512,8 @@
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds):
+                               np.float64_t[:,:] oct_dds,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -610,17 +531,18 @@
                     nneighbors = self.neighbor_search(opos, octree,
                                     nind, nsize, nneighbors, domain_id, &oct, 0)
                     self.neighbor_find(nneighbors, nind[0], doffs, pcounts,
-                                       pinds, ppos, opos, oct_left_edges, oct_dds)
+                                       pinds, ppos, opos, oct_left_edges,
+                                       oct_dds, dq)
                     # Now we have all our neighbors in our neighbor list.
-                    if self.curn <-1*self.maxn:
+                    if dq.curn <-1*dq.maxn:
                         ntot = nntot = 0
                         for m in range(nneighbors):
                             if nind[0][m] < 0: continue
                             nntot += 1
                             ntot += pcounts[nind[0][m]]
-                        print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
+                        print "SOMETHING WRONG", dq.curn, nneighbors, ntot, nntot
                     self.process(offset, i, j, k, dim, opos, fields,
-                                 index_fields)
+                                 index_fields, dq)
                     cpos[2] += dds[2]
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
@@ -637,7 +559,8 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
-                               np.int64_t domain_id, int *nsize):
+                               np.int64_t domain_id, int *nsize,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -652,8 +575,8 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, None, None)
-        self.process(offset, i, j, k, dim, opos, fields, index_fields)
+                           opos, None, None, dq)
+        self.process(offset, i, j, k, dim, opos, fields, index_fields, dq)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
     # This smoothing function evaluates the field, *without* normalization, at
@@ -692,7 +615,7 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef int n, fi
@@ -702,13 +625,13 @@
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         max_hsml = index_fields[0][gind(i,j,k,dim) + offset]
-        for n in range(self.curn):
+        for n in range(dq.curn):
             # No normalization for the moment.
             # fields[0] is the smoothing length.
-            r2 = self.neighbors[n].r2
-            pn = self.neighbors[n].pn
+            r2 = dq.neighbors[n].r2
+            pn = dq.neighbors[n].pn
             # Smoothing kernel weight function
             mass = fields[0][pn]
             hsml = fields[1][pn]
@@ -751,15 +674,15 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        pn = self.neighbors[0].pn
+        pn = dq.neighbors[0].pn
         self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        #self.fp[gind(i,j,k,dim) + offset] = self.neighbors[0].r2
+        #self.fp[gind(i,j,k,dim) + offset] = dq.neighbors[0].r2
         return
 
 nearest_smooth = NearestNeighborSmooth
@@ -785,18 +708,18 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn, ni, di
         cdef np.float64_t total_weight = 0.0, total_value = 0.0, r2, val, w
         # We're going to do a very simple IDW average
-        if self.neighbors[0].r2 == 0.0:
-            pn = self.neighbors[0].pn
+        if dq.neighbors[0].r2 == 0.0:
+            pn = dq.neighbors[0].pn
             self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        for ni in range(self.curn):
-            r2 = self.neighbors[ni].r2
-            val = fields[0][self.neighbors[ni].pn]
+        for ni in range(dq.curn):
+            r2 = dq.neighbors[ni].r2
+            val = fields[0][dq.neighbors[ni].pn]
             w = r2
             for di in range(self.p2 - 1):
                 w *= r2
@@ -821,10 +744,10 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t max_r
         # We assume "offset" here is the particle index.
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         fields[0][offset] = max_r
 
 nth_neighbor_smooth = NthNeighborDistanceSmooth
@@ -842,16 +765,16 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t r2, hsml, dens, mass, weight, lw
         cdef int pn
         # We assume "offset" here is the particle index.
-        hsml = sqrt(self.neighbors[self.curn-1].r2)
+        hsml = sqrt(dq.neighbors[dq.curn-1].r2)
         dens = 0.0
         weight = 0.0
-        for pn in range(self.curn):
-            mass = fields[0][self.neighbors[pn].pn]
-            r2 = self.neighbors[pn].r2
+        for pn in range(dq.curn):
+            mass = fields[0][dq.neighbors[pn].pn]
+            r2 = dq.neighbors[pn].r2
             lw = self.sph_kernel(sqrt(r2) / hsml)
             dens += mass * lw
         weight = (4.0/3.0) * 3.1415926 * hsml**3

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -28,7 +28,8 @@
     assert_array_equal, \
     assert_equal, assert_raises, \
     assert_array_almost_equal_nulp, \
-    assert_array_almost_equal
+    assert_array_almost_equal, \
+    assert_almost_equal
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -1221,3 +1222,16 @@
     arr = [1, 2, 3]*km
     assert_equal(sum(arr), 6*km)
 
+def test_initialization_different_registries():
+    from yt.testing import fake_random_ds
+
+    ds1 = fake_random_ds(32, length_unit=1)
+    ds2 = fake_random_ds(32, length_unit=3)
+
+    l1 = ds1.quan(0.3, 'unitary')
+    l2 = ds2.quan(l1, 'unitary')
+
+    assert_almost_equal(float(l1.in_cgs()), 0.3)
+    assert_almost_equal(float(l2.in_cgs()), 0.9)
+    assert_almost_equal(float(ds1.quan(0.3, 'unitary').in_cgs()), 0.3)
+    assert_almost_equal(float(ds2.quan(0.3, 'unitary').in_cgs()), 0.9)

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -389,17 +389,18 @@
                     "ds.arr(%s, \"%s\")" % (input_array, input_units)
                     )
         if isinstance(input_array, YTArray):
+            ret = input_array.view(cls)
             if input_units is None:
                 if registry is None:
                     pass
                 else:
                     units = Unit(str(input_array.units), registry=registry)
-                    input_array.units = units
+                    ret.units = units
             elif isinstance(input_units, Unit):
-                input_array.units = input_units
+                ret.units = input_units
             else:
-                input_array.units = Unit(input_units, registry=registry)
-            return input_array.view(cls)
+                ret.units = Unit(input_units, registry=registry)
+            return ret
         elif isinstance(input_array, np.ndarray):
             pass
         elif iterable(input_array) and input_array:

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -593,3 +593,43 @@
         v += self.message
         v += " Specified bounds are %s" % self.bounds
         return v
+
+def screen_one_element_list(lis):
+    if len(lis) == 1:
+        return lis[0]
+    return lis
+
+class YTIllDefinedProfile(YTException):
+    def __init__(self, bin_fields, fields, weight_field, is_pfield):
+        nbin = len(bin_fields)
+        nfields = len(fields)
+        self.bin_fields = screen_one_element_list(bin_fields)
+        self.bin_fields_ptype = screen_one_element_list(is_pfield[:nbin])
+        self.fields = screen_one_element_list(fields)
+        self.fields_ptype = screen_one_element_list(is_pfield[nbin:nbin+nfields])
+        self.weight_field = weight_field
+        if self.weight_field is not None:
+            self.weight_field_ptype = is_pfield[-1]
+
+    def __str__(self):
+        msg = (
+            "\nCannot create a profile object that mixes particle and mesh "
+            "fields.\n\n"
+            "Received the following bin_fields:\n\n"
+            "   %s, particle_type = %s\n\n"
+            "Profile fields:\n\n"
+            "   %s, particle_type = %s\n"
+        )
+        msg = msg % (
+            self.bin_fields, self.bin_fields_ptype,
+            self.fields, self.fields_ptype
+        )
+
+        if self.weight_field is not None:
+            weight_msg = "\nAnd weight field:\n\n   %s, particle_type = %s\n"
+            weight_msg = weight_msg % (
+                self.weight_field, self.weight_field_ptype)
+        else:
+            weight_msg = ""
+
+        return msg + weight_msg

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/autogenerated_element_samplers.pxd
--- /dev/null
+++ b/yt/utilities/lib/autogenerated_element_samplers.pxd
@@ -0,0 +1,41 @@
+cdef void Q1Function3D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 
+cdef void Q1Jacobian3D(double* rcol,
+                       double* scol,
+                       double* tcol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 
+cdef void Q1Function2D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 
+cdef void Q1Jacobian2D(double* rcol,
+                       double* scol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 
+cdef void W1Function3D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 
+cdef void W1Jacobian3D(double* rcol,
+                       double* scol,
+                       double* tcol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil 
+
+ 

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/autogenerated_element_samplers.pyx
--- /dev/null
+++ b/yt/utilities/lib/autogenerated_element_samplers.pyx
@@ -0,0 +1,99 @@
+# This file contains auto-generated functions for sampling 
+# inside finite element solutions for various mesh types. 
+# To see how the code generation works in detail, see 
+# yt/utilities/mesh_code_generation.py. 
+
+ 
+cimport cython 
+ 
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void Q1Function3D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	fx[0] = 0.125*(1 - x[0])*(1 - x[1])*(1 - x[2])*vertices[0] + 0.125*(1 - x[0])*(1 - x[1])*(1 + x[2])*vertices[12] + 0.125*(1 - x[0])*(1 + x[1])*(1 - x[2])*vertices[9] + 0.125*(1 - x[0])*(1 + x[1])*(1 + x[2])*vertices[21] + 0.125*(1 + x[0])*(1 - x[1])*(1 - x[2])*vertices[3] + 0.125*(1 + x[0])*(1 - x[1])*(1 + x[2])*vertices[15] + 0.125*(1 + x[0])*(1 + x[1])*(1 - x[2])*vertices[6] + 0.125*(1 + x[0])*(1 + x[1])*(1 + x[2])*vertices[18] - phys_x[0];
+	fx[1] = 0.125*(1 - x[0])*(1 - x[1])*(1 - x[2])*vertices[1] + 0.125*(1 - x[0])*(1 - x[1])*(1 + x[2])*vertices[13] + 0.125*(1 - x[0])*(1 + x[1])*(1 - x[2])*vertices[10] + 0.125*(1 - x[0])*(1 + x[1])*(1 + x[2])*vertices[22] + 0.125*(1 + x[0])*(1 - x[1])*(1 - x[2])*vertices[4] + 0.125*(1 + x[0])*(1 - x[1])*(1 + x[2])*vertices[16] + 0.125*(1 + x[0])*(1 + x[1])*(1 - x[2])*vertices[7] + 0.125*(1 + x[0])*(1 + x[1])*(1 + x[2])*vertices[19] - phys_x[1];
+	fx[2] = 0.125*(1 - x[0])*(1 - x[1])*(1 - x[2])*vertices[2] + 0.125*(1 - x[0])*(1 - x[1])*(1 + x[2])*vertices[14] + 0.125*(1 - x[0])*(1 + x[1])*(1 - x[2])*vertices[11] + 0.125*(1 - x[0])*(1 + x[1])*(1 + x[2])*vertices[23] + 0.125*(1 + x[0])*(1 - x[1])*(1 - x[2])*vertices[5] + 0.125*(1 + x[0])*(1 - x[1])*(1 + x[2])*vertices[17] + 0.125*(1 + x[0])*(1 + x[1])*(1 - x[2])*vertices[8] + 0.125*(1 + x[0])*(1 + x[1])*(1 + x[2])*vertices[20] - phys_x[2];
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void Q1Jacobian3D(double* rcol,
+                       double* scol,
+                       double* tcol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	rcol[0] = -0.125*(1 - x[1])*(1 - x[2])*vertices[0] + 0.125*(1 - x[1])*(1 - x[2])*vertices[3] - 0.125*(1 - x[1])*(1 + x[2])*vertices[12] + 0.125*(1 - x[1])*(1 + x[2])*vertices[15] + 0.125*(1 + x[1])*(1 - x[2])*vertices[6] - 0.125*(1 + x[1])*(1 - x[2])*vertices[9] + 0.125*(1 + x[1])*(1 + x[2])*vertices[18] - 0.125*(1 + x[1])*(1 + x[2])*vertices[21];
+	scol[0] = -0.125*(1 - x[0])*(1 - x[2])*vertices[0] + 0.125*(1 - x[0])*(1 - x[2])*vertices[9] - 0.125*(1 - x[0])*(1 + x[2])*vertices[12] + 0.125*(1 - x[0])*(1 + x[2])*vertices[21] - 0.125*(1 + x[0])*(1 - x[2])*vertices[3] + 0.125*(1 + x[0])*(1 - x[2])*vertices[6] - 0.125*(1 + x[0])*(1 + x[2])*vertices[15] + 0.125*(1 + x[0])*(1 + x[2])*vertices[18];
+	tcol[0] = -0.125*(1 - x[0])*(1 - x[1])*vertices[0] + 0.125*(1 - x[0])*(1 - x[1])*vertices[12] - 0.125*(1 - x[0])*(1 + x[1])*vertices[9] + 0.125*(1 - x[0])*(1 + x[1])*vertices[21] - 0.125*(1 + x[0])*(1 - x[1])*vertices[3] + 0.125*(1 + x[0])*(1 - x[1])*vertices[15] - 0.125*(1 + x[0])*(1 + x[1])*vertices[6] + 0.125*(1 + x[0])*(1 + x[1])*vertices[18];
+	rcol[1] = -0.125*(1 - x[1])*(1 - x[2])*vertices[1] + 0.125*(1 - x[1])*(1 - x[2])*vertices[4] - 0.125*(1 - x[1])*(1 + x[2])*vertices[13] + 0.125*(1 - x[1])*(1 + x[2])*vertices[16] + 0.125*(1 + x[1])*(1 - x[2])*vertices[7] - 0.125*(1 + x[1])*(1 - x[2])*vertices[10] + 0.125*(1 + x[1])*(1 + x[2])*vertices[19] - 0.125*(1 + x[1])*(1 + x[2])*vertices[22];
+	scol[1] = -0.125*(1 - x[0])*(1 - x[2])*vertices[1] + 0.125*(1 - x[0])*(1 - x[2])*vertices[10] - 0.125*(1 - x[0])*(1 + x[2])*vertices[13] + 0.125*(1 - x[0])*(1 + x[2])*vertices[22] - 0.125*(1 + x[0])*(1 - x[2])*vertices[4] + 0.125*(1 + x[0])*(1 - x[2])*vertices[7] - 0.125*(1 + x[0])*(1 + x[2])*vertices[16] + 0.125*(1 + x[0])*(1 + x[2])*vertices[19];
+	tcol[1] = -0.125*(1 - x[0])*(1 - x[1])*vertices[1] + 0.125*(1 - x[0])*(1 - x[1])*vertices[13] - 0.125*(1 - x[0])*(1 + x[1])*vertices[10] + 0.125*(1 - x[0])*(1 + x[1])*vertices[22] - 0.125*(1 + x[0])*(1 - x[1])*vertices[4] + 0.125*(1 + x[0])*(1 - x[1])*vertices[16] - 0.125*(1 + x[0])*(1 + x[1])*vertices[7] + 0.125*(1 + x[0])*(1 + x[1])*vertices[19];
+	rcol[2] = -0.125*(1 - x[1])*(1 - x[2])*vertices[2] + 0.125*(1 - x[1])*(1 - x[2])*vertices[5] - 0.125*(1 - x[1])*(1 + x[2])*vertices[14] + 0.125*(1 - x[1])*(1 + x[2])*vertices[17] + 0.125*(1 + x[1])*(1 - x[2])*vertices[8] - 0.125*(1 + x[1])*(1 - x[2])*vertices[11] + 0.125*(1 + x[1])*(1 + x[2])*vertices[20] - 0.125*(1 + x[1])*(1 + x[2])*vertices[23];
+	scol[2] = -0.125*(1 - x[0])*(1 - x[2])*vertices[2] + 0.125*(1 - x[0])*(1 - x[2])*vertices[11] - 0.125*(1 - x[0])*(1 + x[2])*vertices[14] + 0.125*(1 - x[0])*(1 + x[2])*vertices[23] - 0.125*(1 + x[0])*(1 - x[2])*vertices[5] + 0.125*(1 + x[0])*(1 - x[2])*vertices[8] - 0.125*(1 + x[0])*(1 + x[2])*vertices[17] + 0.125*(1 + x[0])*(1 + x[2])*vertices[20];
+	tcol[2] = -0.125*(1 - x[0])*(1 - x[1])*vertices[2] + 0.125*(1 - x[0])*(1 - x[1])*vertices[14] - 0.125*(1 - x[0])*(1 + x[1])*vertices[11] + 0.125*(1 - x[0])*(1 + x[1])*vertices[23] - 0.125*(1 + x[0])*(1 - x[1])*vertices[5] + 0.125*(1 + x[0])*(1 - x[1])*vertices[17] - 0.125*(1 + x[0])*(1 + x[1])*vertices[8] + 0.125*(1 + x[0])*(1 + x[1])*vertices[20];
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void Q1Function2D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	fx[0] = 0.25*(1 - x[0])*(1 - x[1])*vertices[0] + 0.25*(1 - x[0])*(1 + x[1])*vertices[6] + 0.25*(1 + x[0])*(1 - x[1])*vertices[2] + 0.25*(1 + x[0])*(1 + x[1])*vertices[4] - phys_x[0];
+	fx[1] = 0.25*(1 - x[0])*(1 - x[1])*vertices[1] + 0.25*(1 - x[0])*(1 + x[1])*vertices[7] + 0.25*(1 + x[0])*(1 - x[1])*vertices[3] + 0.25*(1 + x[0])*(1 + x[1])*vertices[5] - phys_x[1];
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void Q1Jacobian2D(double* rcol,
+                       double* scol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	rcol[0] = -0.25*(1 - x[1])*vertices[0] + 0.25*(1 - x[1])*vertices[2] + 0.25*(1 + x[1])*vertices[4] - 0.25*(1 + x[1])*vertices[6];
+	scol[0] = -0.25*(1 - x[0])*vertices[0] + 0.25*(1 - x[0])*vertices[6] - 0.25*(1 + x[0])*vertices[2] + 0.25*(1 + x[0])*vertices[4];
+	rcol[1] = -0.25*(1 - x[1])*vertices[1] + 0.25*(1 - x[1])*vertices[3] + 0.25*(1 + x[1])*vertices[5] - 0.25*(1 + x[1])*vertices[7];
+	scol[1] = -0.25*(1 - x[0])*vertices[1] + 0.25*(1 - x[0])*vertices[7] - 0.25*(1 + x[0])*vertices[3] + 0.25*(1 + x[0])*vertices[5];
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void W1Function3D(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	fx[0] = 0.5*(1 - x[0] - x[1])*(1 - x[2])*vertices[0] + 0.5*(1 - x[0] - x[1])*(1 + x[2])*vertices[9] - phys_x[0] + 0.5*x[0]*(1 - x[2])*vertices[3] + 0.5*x[0]*(1 + x[2])*vertices[12] + 0.5*x[1]*(1 - x[2])*vertices[6] + 0.5*x[1]*(1 + x[2])*vertices[15];
+	fx[1] = 0.5*(1 - x[0] - x[1])*(1 - x[2])*vertices[1] + 0.5*(1 - x[0] - x[1])*(1 + x[2])*vertices[10] - phys_x[1] + 0.5*x[0]*(1 - x[2])*vertices[4] + 0.5*x[0]*(1 + x[2])*vertices[13] + 0.5*x[1]*(1 - x[2])*vertices[7] + 0.5*x[1]*(1 + x[2])*vertices[16];
+	fx[2] = 0.5*(1 - x[0] - x[1])*(1 - x[2])*vertices[2] + 0.5*(1 - x[0] - x[1])*(1 + x[2])*vertices[11] - phys_x[2] + 0.5*x[0]*(1 - x[2])*vertices[5] + 0.5*x[0]*(1 + x[2])*vertices[14] + 0.5*x[1]*(1 - x[2])*vertices[8] + 0.5*x[1]*(1 + x[2])*vertices[17];
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) 
+cdef void W1Jacobian3D(double* rcol,
+                       double* scol,
+                       double* tcol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil: 
+	rcol[0] = -0.5*(1 - x[2])*vertices[0] + 0.5*(1 - x[2])*vertices[3] - 0.5*(1 + x[2])*vertices[9] + 0.5*(1 + x[2])*vertices[12];
+	scol[0] = -0.5*(1 - x[2])*vertices[0] + 0.5*(1 - x[2])*vertices[6] - 0.5*(1 + x[2])*vertices[9] + 0.5*(1 + x[2])*vertices[15];
+	tcol[0] = -0.5*(1 - x[0] - x[1])*vertices[0] + 0.5*(1 - x[0] - x[1])*vertices[9] - 0.5*x[0]*vertices[3] + 0.5*x[0]*vertices[12] - 0.5*x[1]*vertices[6] + 0.5*x[1]*vertices[15];
+	rcol[1] = -0.5*(1 - x[2])*vertices[1] + 0.5*(1 - x[2])*vertices[4] - 0.5*(1 + x[2])*vertices[10] + 0.5*(1 + x[2])*vertices[13];
+	scol[1] = -0.5*(1 - x[2])*vertices[1] + 0.5*(1 - x[2])*vertices[7] - 0.5*(1 + x[2])*vertices[10] + 0.5*(1 + x[2])*vertices[16];
+	tcol[1] = -0.5*(1 - x[0] - x[1])*vertices[1] + 0.5*(1 - x[0] - x[1])*vertices[10] - 0.5*x[0]*vertices[4] + 0.5*x[0]*vertices[13] - 0.5*x[1]*vertices[7] + 0.5*x[1]*vertices[16];
+	rcol[2] = -0.5*(1 - x[2])*vertices[2] + 0.5*(1 - x[2])*vertices[5] - 0.5*(1 + x[2])*vertices[11] + 0.5*(1 + x[2])*vertices[14];
+	scol[2] = -0.5*(1 - x[2])*vertices[2] + 0.5*(1 - x[2])*vertices[8] - 0.5*(1 + x[2])*vertices[11] + 0.5*(1 + x[2])*vertices[17];
+	tcol[2] = -0.5*(1 - x[0] - x[1])*vertices[2] + 0.5*(1 - x[0] - x[1])*vertices[11] - 0.5*x[0]*vertices[5] + 0.5*x[0]*vertices[14] - 0.5*x[1]*vertices[8] + 0.5*x[1]*vertices[17];
+
+ 

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/distance_queue.pxd
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pxd
@@ -0,0 +1,43 @@
+"""
+A queue for evaluating distances to discrete points
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport cython
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+from libc.string cimport memmove
+
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2)
+
+cdef class DistanceQueue:
+    cdef int maxn
+    cdef int curn
+    cdef np.float64_t DW[3]
+    cdef bint periodicity[3]
+    cdef NeighborList* neighbors # flat array
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3])
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/distance_queue.pyx
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -0,0 +1,149 @@
+"""
+Distance queue implementation
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+cimport numpy as np
+import numpy as np
+cimport cython
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1
+    cdef NeighborList *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.initializedcheck(False)
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if max_dist2 >= 0.0 and r2 > max_dist2:
+            return -1.0
+    return r2
+
+cdef class DistanceQueue:
+    """This is a distance queue object, designed to incrementally evaluate N
+    positions against a reference point.  It is initialized with the maximum
+    number that are to be retained (i.e., maxn-nearest neighbors)."""
+    def __cinit__(self, int maxn):
+        cdef int i
+        self.maxn = maxn
+        self.curn = 0
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+        for i in range(3):
+            self.DW[i] = 0
+            self.periodicity[i] = False
+
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3]):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def setup(self, np.float64_t[:] DW, periodicity):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def __dealloc__(self):
+        free(self.neighbors)
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        # Here's a python+numpy simulator of this:
+        # http://paste.yt-project.org/show/5445/
+        cdef int i, di
+        cdef np.float64_t r2, r2_trunc
+        if self.curn == self.maxn:
+            # Truncate calculation if it's bigger than this in any dimension
+            r2_trunc = self.neighbors[self.curn - 1].r2
+        else:
+            # Don't truncate our calculation
+            r2_trunc = -1
+        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
+        if r2 == -1:
+            return
+        if self.curn == 0:
+            self.neighbors[0].r2 = r2
+            self.neighbors[0].pn = pn
+            self.curn += 1
+            return
+        # Now insert in a sorted way
+        di = -1
+        for i in range(self.curn - 1, -1, -1):
+            # We are checking if i is less than us, to see if we should insert
+            # to the right (i.e., i+1).
+            if self.neighbors[i].r2 < r2:
+                di = i
+                break
+        # The outermost one is already too small.
+        if di == self.maxn - 1:
+            return
+        if (self.maxn - (di + 2)) > 0:
+            memmove(<void *> (self.neighbors + di + 2),
+                    <void *> (self.neighbors + di + 1),
+                    sizeof(NeighborList) * (self.maxn - (di + 2)))
+        self.neighbors[di + 1].r2 = r2
+        self.neighbors[di + 1].pn = pn
+        if self.curn < self.maxn:
+            self.curn += 1
+
+    cdef void neighbor_reset(self):
+        for i in range(self.maxn):
+            self.neighbors[i].r2 = 1e300
+            self.neighbors[i].pn = -1
+        self.curn = 0
+
+    def find_nearest(self, np.float64_t[:] center, np.float64_t[:,:] points):
+        """This function accepts a center and a set of [N,3] points, and it
+        will return the indices into the points array of the maxn closest
+        neighbors."""
+        cdef int i, j
+        cdef np.float64_t ppos[3], cpos[3]
+        self.neighbor_reset()
+        for i in range(3):
+            cpos[i] = center[i]
+        for j in range(points.shape[0]):
+            for i in range(3):
+                ppos[i] = points[j,i]
+            self.neighbor_eval(j, ppos, cpos)
+        rv = np.empty(self.curn, dtype="int64")
+        for i in range(self.curn):
+            rv[i] = self.neighbors[i].pn
+        return rv

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -117,10 +117,11 @@
 #
 # outputs:
 #
-#     A        - A flattened array storing the Jacobian matrix
-#                The order of this array is [J11, J12, J21, J22]
+#     rcol     - the first column of the jacobian
+#     scol     - the second column of the jacobian
 #
-ctypedef void (*jac_type2D)(double* A,
+ctypedef void (*jac_type2D)(double* rcol,
+                            double* scol,
                             double* x,
                             double* vertices,
                             double* phys_x) nogil

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -19,6 +19,13 @@
 cimport cython
 import numpy as np
 from libc.math cimport fabs
+from yt.utilities.lib.autogenerated_element_samplers cimport \
+    Q1Function3D, \
+    Q1Jacobian3D, \
+    Q1Function2D, \
+    Q1Jacobian2D, \
+    W1Function3D, \
+    W1Jacobian3D
 
 cdef extern from "platform_dep.h":
     double fmax(double x, double y) nogil
@@ -392,70 +399,6 @@
             return -1
 
 
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void Q1Function3D(double* fx,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        fx[i] = vertices[0 + i]*rm*sm*tm \
-              + vertices[3 + i]*rp*sm*tm \
-              + vertices[6 + i]*rp*sp*tm \
-              + vertices[9 + i]*rm*sp*tm \
-              + vertices[12 + i]*rm*sm*tp \
-              + vertices[15 + i]*rp*sm*tp \
-              + vertices[18 + i]*rp*sp*tp \
-              + vertices[21 + i]*rm*sp*tp \
-              - 8.0*phys_x[i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void Q1Jacobian3D(double* rcol,
-                              double* scol,
-                              double* tcol,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:    
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        rcol[i] = -sm*tm*vertices[0 + i]  + sm*tm*vertices[3 + i]  + \
-                   sp*tm*vertices[6 + i]  - sp*tm*vertices[9 + i]  - \
-                   sm*tp*vertices[12 + i] + sm*tp*vertices[15 + i] + \
-                   sp*tp*vertices[18 + i] - sp*tp*vertices[21 + i]
-        scol[i] = -rm*tm*vertices[0 + i]  - rp*tm*vertices[3 + i]  + \
-                   rp*tm*vertices[6 + i]  + rm*tm*vertices[9 + i]  - \
-                   rm*tp*vertices[12 + i] - rp*tp*vertices[15 + i] + \
-                   rp*tp*vertices[18 + i] + rm*tp*vertices[21 + i]
-        tcol[i] = -rm*sm*vertices[0 + i]  - rp*sm*vertices[3 + i]  - \
-                   rp*sp*vertices[6 + i]  - rm*sp*vertices[9 + i]  + \
-                   rm*sm*vertices[12 + i] + rp*sm*vertices[15 + i] + \
-                   rp*sp*vertices[18 + i] + rm*sp*vertices[21 + i]
-
-
 cdef class S2Sampler3D(NonlinearSolveSampler3D):
 
     ''' 
@@ -745,52 +688,6 @@
             return -1
 
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void W1Function3D(double* fx,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:
-    cdef int i
-    for i in range(3):
-        fx[i] = vertices[0 + i]*(1.0 - x[0] - x[1])*(1.0 - x[2]) \
-              + vertices[3 + i]*x[0]*(1.0 - x[2]) \
-              + vertices[6 + i]*x[1]*(1.0 - x[2]) \
-              + vertices[9 + i]*(1.0 - x[0] - x[1])*(1.0 + x[2]) \
-              + vertices[12 + i]*x[0]*(1.0 + x[2]) \
-              + vertices[15 + i]*x[1]*(1.0 + x[2]) \
-              - 2.0*phys_x[i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void W1Jacobian3D(double* rcol,
-                              double* scol,
-                              double* tcol,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:    
-
-    cdef int i
-    for i in range(3):
-        rcol[i] = (x[2] - 1.0) * vertices[0 + i] \
-                - (x[2] - 1.0) * vertices[3 + i] \
-                - (x[2] + 1.0) * vertices[9 + i] \
-                + (x[2] + 1.0) * vertices[12 + i]
-        scol[i] = (x[2] - 1.0) * vertices[0 + i] \
-                - (x[2] - 1.0) * vertices[6 + i] \
-                - (x[2] + 1.0) * vertices[9 + i] \
-                + (x[2] + 1.0) * vertices[15 + i]
-        tcol[i] = (x[0] + x[1] - 1.0) * vertices[0 + i] \
-                - x[0] * vertices[3 + i] \
-                - x[1] * vertices[6 + i] \
-                - (x[0] + x[1] - 1.0) * vertices[9 + i] \
-                + x[0] * vertices[12 + i] \
-                + x[1] * vertices[15 + i]
-
-
 cdef class NonlinearSolveSampler2D(ElementSampler):
 
     '''
@@ -834,11 +731,11 @@
    
         # begin Newton iteration
         while (err > self.tolerance and iterations < self.max_iter):
-            self.jac(A, x, vertices, physical_x)
+            self.jac(&A[0], &A[2], x, vertices, physical_x)
             d = (A[0]*A[3] - A[1]*A[2])
             
-            x[0] -= ( A[3]*f[0] - A[1]*f[1]) / d
-            x[1] -= (-A[2]*f[0] + A[0]*f[1]) / d
+            x[0] -= ( A[3]*f[0] - A[2]*f[1]) / d
+            x[1] -= (-A[1]*f[0] + A[0]*f[1]) / d
 
             self.func(f, x, vertices, physical_x)        
             err = maxnorm(f, 2)
@@ -893,48 +790,6 @@
             return 0
         return 1
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void Q1Jacobian2D(double* A,
-                              double* x,
-                              double* vertices,
-                              double* phys_x) nogil:
-    cdef double rm, rp, sm, sp
-
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-
-    A[0] = -sm*vertices[0] + sm*vertices[2] + sp*vertices[4] - sp*vertices[6]
-    A[1] = -rm*vertices[0] - rp*vertices[2] + rp*vertices[4] + rm*vertices[6]
-    A[2] = -sm*vertices[1] + sm*vertices[3] + sp*vertices[5] - sp*vertices[7]
-    A[3] = -rm*vertices[1] - rp*vertices[3] + rp*vertices[5] + rm*vertices[7]
-    
-                
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void Q1Function2D(double* fx,
-                              double* x,
-                              double* vertices,
-                              double* phys_x) nogil:
-    cdef int i
-    cdef double rm, rp, sm, sp
-
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-
-    for i in range(2):
-        fx[i] = vertices[0 + i]*rm*sm \
-              + vertices[2 + i]*rp*sm \
-              + vertices[4 + i]*rp*sp \
-              + vertices[6 + i]*rm*sp \
-              - 4.0*phys_x[i]
-
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/mesh_code_generation.py
--- /dev/null
+++ b/yt/utilities/mesh_code_generation.py
@@ -0,0 +1,192 @@
+"""
+This file contains code for automatically generating the functions and jacobians
+used when sampling inside the supported finite element mesh types. The supported
+mesh types are defined in yt/utilities/mesh_types.yaml.
+
+Usage (from the yt/utilities directory):
+
+python mesh_code_generation.py 
+
+This will generate the necessary functions and write them to 
+yt/utilities/lib/autogenerated_element_samplers.pyx.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from sympy import \
+    symarray, \
+    diff, \
+    ccode, \
+    Matrix, \
+    MatrixSymbol
+import yaml
+
+
+# define some templates used below
+fun_signature = '''cdef void %s(double* fx,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil'''
+
+fun_dec_template = fun_signature + ' \n'
+fun_def_template = '''@cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) \n''' + fun_signature + ': \n'
+
+jac_signature_3D = '''cdef void %s(double* rcol,
+                       double* scol,
+                       double* tcol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil'''
+
+jac_dec_template_3D = jac_signature_3D + ' \n'
+jac_def_template_3D = '''@cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) \n''' + jac_signature_3D + ': \n'
+
+jac_signature_2D = '''cdef void %s(double* rcol,
+                       double* scol,
+                       double* x,
+                       double* vertices,
+                       double* phys_x) nogil'''
+jac_dec_template_2D = jac_signature_2D + ' \n'
+jac_def_template_2D = '''@cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True) \n''' + jac_signature_2D + ': \n'
+
+file_header = "# This file contains auto-generated functions for sampling \n" + \
+              "# inside finite element solutions for various mesh types. \n" + \
+              "# To see how the code generation works in detail, see \n" + \
+              "# yt/utilities/mesh_code_generation.py. \n"
+
+
+class MeshCodeGenerator:
+    '''
+
+    A class for automatically generating the functions and jacobians used for
+    sampling inside finite element calculations.
+
+    '''
+    def __init__(self, mesh_data):
+        '''
+
+        Mesh data should be a dictionary containing information about the type
+        of elements used. See yt/utilities/mesh_types.yaml for more information.
+
+        '''
+        self.mesh_type = mesh_data['mesh_type']
+        self.num_dim = mesh_data['num_dim']
+        self.num_vertices = mesh_data['num_vertices']
+        self.num_mapped_coords = mesh_data['num_mapped_coords']
+
+        x = MatrixSymbol('x', self.num_mapped_coords, 1)
+        self.x = x
+        self.N = Matrix(eval(mesh_data['shape_functions']))
+        self._compute_jacobian()
+
+    def _compute_jacobian(self):
+
+        assert(self.num_vertices == len(self.N))
+        assert(self.num_dim == self.num_mapped_coords)
+
+        X = MatrixSymbol("vertices", self.num_vertices, self.num_dim)
+        self.fx = MatrixSymbol("fx", self.num_dim, 1)
+        physical_position = MatrixSymbol('phys_x', self.num_dim, 1)
+
+        self.f = (self.N.T * Matrix(X)).T - physical_position
+
+        self.J = symarray('J', (self.num_dim, self.num_dim))
+        for i in range(self.num_dim):
+            for j, var in enumerate(self.x):
+                self.J[i][j] = diff(self.f[i, 0], var)
+
+        self.rcol = MatrixSymbol("rcol", self.num_dim, 1)
+        self.scol = MatrixSymbol("scol", self.num_dim, 1)
+        self.tcol = MatrixSymbol("tcol", self.num_dim, 1)
+
+        self.function_name = '%sFunction%dD' % (self.mesh_type, self.num_dim)
+        self.function_header = fun_def_template % self.function_name
+        self.function_declaration = fun_dec_template % self.function_name
+
+        self.jacobian_name = '%sJacobian%dD' % (self.mesh_type, self.num_dim)
+
+        if (self.num_dim == 3):
+            self.jacobian_header = jac_def_template_3D % self.jacobian_name 
+            self.jacobian_declaration = jac_dec_template_3D % self.jacobian_name
+
+        elif (self.num_dim == 2):
+            self.jacobian_header = jac_def_template_2D % self.jacobian_name
+            self.jacobian_declaration = jac_dec_template_2D % self.jacobian_name
+
+    def get_interpolator_definition(self):
+        '''
+
+        This returns the function definitions for the given mesh type.
+
+        '''
+
+        function_code = self.function_header
+        for i in range(self.num_dim):
+            function_code += '\t' + ccode(self.f[i, 0], self.fx[i, 0]) + '\n'
+    
+        jacobian_code = self.jacobian_header
+        for i in range(self.num_dim):
+            jacobian_code += '\t' + ccode(self.J[i,0], self.rcol[i, 0]) + '\n'
+            jacobian_code += '\t' + ccode(self.J[i,1], self.scol[i, 0]) + '\n'
+            if self.num_dim == 2: 
+                continue
+            jacobian_code += '\t' + ccode(self.J[i,2], self.tcol[i, 0]) + '\n'
+            
+        return function_code, jacobian_code
+
+    def get_interpolator_declaration(self):
+        '''
+
+        This returns the function declarations for the given mesh type.
+
+        '''
+        return self.function_declaration, self.jacobian_declaration
+
+
+if __name__ == "__main__":
+
+    with open('mesh_types.yaml', 'r') as f:
+        lines = f.read()
+
+    mesh_types = yaml.load(lines)
+
+    pxd_file = open("lib/autogenerated_element_samplers.pxd", "w")
+    pyx_file = open("lib/autogenerated_element_samplers.pyx", "w")
+
+    pyx_file.write(file_header)
+    pyx_file.write("\n \n")
+    pyx_file.write("cimport cython \n \n")
+    pyx_file.write("\n \n")
+    
+    for _, mesh_data in sorted(mesh_types.items()):
+        codegen = MeshCodeGenerator(mesh_data)
+
+        function_code, jacobian_code = codegen.get_interpolator_definition()
+        function_decl, jacobian_decl = codegen.get_interpolator_declaration()
+
+        pxd_file.write(function_decl)
+        pxd_file.write("\n \n")
+        pxd_file.write(jacobian_decl)
+        pxd_file.write("\n \n")
+
+        pyx_file.write(function_code)
+        pyx_file.write("\n \n")
+        pyx_file.write(jacobian_code)
+        pyx_file.write("\n \n")
+
+    pxd_file.close()
+    pyx_file.close()

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/utilities/mesh_types.yaml
--- /dev/null
+++ b/yt/utilities/mesh_types.yaml
@@ -0,0 +1,38 @@
+Hex8:
+  mesh_type: Q1
+  num_dim: 3
+  num_vertices: 8
+  num_mapped_coords: 3
+  shape_functions: |
+    [(1 - x[0])*(1 - x[1])*(1 - x[2])/8.,
+     (1 + x[0])*(1 - x[1])*(1 - x[2])/8.,
+     (1 + x[0])*(1 + x[1])*(1 - x[2])/8.,
+     (1 - x[0])*(1 + x[1])*(1 - x[2])/8.,
+     (1 - x[0])*(1 - x[1])*(1 + x[2])/8.,
+     (1 + x[0])*(1 - x[1])*(1 + x[2])/8.,
+     (1 + x[0])*(1 + x[1])*(1 + x[2])/8.,
+     (1 - x[0])*(1 + x[1])*(1 + x[2])/8.]
+    
+Quad4:
+  mesh_type: Q1
+  num_dim: 2
+  num_vertices: 4
+  num_mapped_coords: 2
+  shape_functions: |
+    [(1 - x[0])*(1 - x[1])/4.,
+     (1 + x[0])*(1 - x[1])/4.,
+     (1 + x[0])*(1 + x[1])/4.,
+     (1 - x[0])*(1 + x[1])/4.]
+
+Wedge6:
+  mesh_type: W1
+  num_dim: 3
+  num_vertices: 6
+  num_mapped_coords: 3
+  shape_functions: |
+    [(1 - x[0] - x[1]) * (1 - x[2]) / 2.,
+     x[0] * (1 - x[2]) / 2.,
+     x[1] * (1 - x[2]) / 2.,
+     (1 - x[0] - x[1]) * (1 + x[2]) / 2.,
+     x[0] * (1 + x[2]) / 2.,
+     x[1] * (1 + x[2]) / 2.]
\ No newline at end of file

diff -r f3630a595d0f59d3c74fe3da3de53bc527159187 -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -156,21 +156,26 @@
     camera.resolution = resolution
     if not iterable(width):
         width = data_source.ds.arr([width]*3)
-    camera.position = center - width[2]*normal_vector
+    normal = np.array(normal_vector)
+    normal = normal / np.linalg.norm(normal)
+
+    camera.position = center - width[2]*normal
     camera.focus = center
-    
+
     # If north_vector is None, we set the default here.
-    # This is chosen so that if normal_vector is one of the 
+    # This is chosen so that if normal_vector is one of the
     # cartesian coordinate axes, the projection will match
     # the corresponding on-axis projection.
     if north_vector is None:
         vecs = np.identity(3)
-        t = np.cross(vecs, normal_vector).sum(axis=1)
+        t = np.cross(vecs, normal).sum(axis=1)
         ax = t.argmax()
-        east_vector = np.cross(vecs[ax, :], normal_vector).ravel()
-        north_vector = np.cross(normal_vector, east_vector).ravel()
-    camera.switch_orientation(normal_vector,
-                              north_vector)
+        east_vector = np.cross(vecs[ax, :], normal).ravel()
+        north = np.cross(normal, east_vector).ravel()
+    else:
+        north = np.array(north_vector)
+        north = north / np.linalg.norm(north)
+    camera.switch_orientation(normal, north)
 
     sc.add_source(vol)
 


https://bitbucket.org/yt_analysis/yt/commits/51652f4ac234/
Changeset:   51652f4ac234
Branch:      yt
User:        brittonsmith
Date:        2016-08-26 11:19:11+00:00
Summary:     Removing unit workaround and checking for a dimensionless YTArray.
Affected #:  1 file

diff -r e5a12119efc0a96d12632552d2e6c99f0ffe9ac6 -r 51652f4ac23479e08ed4926d4fba7fdd56c80cec yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -249,7 +249,7 @@
                     else:
                         ds = load(self.light_ray_solution[q]["filename"])
                         ray_length = \
-                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'].d,
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
                                   "unitary")
                         self.light_ray_solution[q]['start'], \
                           self.light_ray_solution[q]['end'] = \
@@ -502,7 +502,8 @@
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
-                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
+                  not my_segment["traversal_box_fraction"].units.is_dimensionless:
                     segment_length = \
                       my_segment["traversal_box_fraction"].in_units("Mpccm / h")
                 else:


https://bitbucket.org/yt_analysis/yt/commits/c55375fd9efc/
Changeset:   c55375fd9efc
Branch:      yt
User:        brittonsmith
Date:        2016-08-27 08:10:36+00:00
Summary:     Make sure ray start/end and left/right edges use the dataset's unit system where proper/comoving conversion is correct.
Affected #:  1 file

diff -r 51652f4ac23479e08ed4926d4fba7fdd56c80cec -r c55375fd9efc7baa3d39586ada642e521608549c yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -517,15 +517,18 @@
             else:
                 next_redshift = my_segment['next']['redshift']
 
+            # Make sure start, end, left, right
+            # are using the dataset's unit system.
+            my_start = ds.arr(my_segment['start'])
+            my_end   = ds.arr(my_segment['end'])
+            my_left  = ds.arr(left_edge)
+            my_right = ds.arr(right_edge)
             mylog.info("Getting segment at z = %s: %s to %s." %
-                       (my_segment['redshift'], my_segment['start'],
-                        my_segment['end']))
+                       (my_segment['redshift'], my_start, my_end))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'],
-                                        my_segment['end'],
-                                        left=left_edge,
-                                        right=right_edge)
+            sub_segments = periodic_ray(my_start, my_end,
+                                        left=my_left, right=my_right)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -598,8 +601,7 @@
             # Get redshift for each lixel.  Assume linear relation between l 
             # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'],
-                                                my_segment['end']).in_cgs())
+                (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 


https://bitbucket.org/yt_analysis/yt/commits/22ae9c82a906/
Changeset:   22ae9c82a906
Branch:      yt
User:        brittonsmith
Date:        2016-08-27 08:16:01+00:00
Summary:     Speed limit for Hubble's law.
Affected #:  1 file

diff -r c55375fd9efc7baa3d39586ada642e521608549c -r 22ae9c82a906043562eb20055385fa81dca964dc yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -320,6 +320,7 @@
         # Use Hubble's law for initial guess
         target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
         v = self.cosmology.hubble_parameter(z) * target_distance
+        v = min(v, 0.9 * c)
         dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
         z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")


https://bitbucket.org/yt_analysis/yt/commits/df408d25ac13/
Changeset:   df408d25ac13
Branch:      yt
User:        brittonsmith
Date:        2016-08-27 17:42:14+00:00
Summary:     Ray init no longer clobbers incoming units.
Affected #:  1 file

diff -r 22ae9c82a906043562eb20055385fa81dca964dc -r df408d25ac13eabc3a105feccb79cfc65dca1ebb yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -197,10 +197,20 @@
     def __init__(self, start_point, end_point, ds=None,
                  field_parameters=None, data_source=None):
         super(YTRay, self).__init__(ds, field_parameters, data_source)
-        self.start_point = self.ds.arr(start_point,
-                            'code_length', dtype='float64')
-        self.end_point = self.ds.arr(end_point,
-                            'code_length', dtype='float64')
+        if isinstance(start_point, YTArray):
+            self.start_point = \
+              self.ds.arr(start_point).to("code_length")
+        else:
+            self.start_point = \
+              self.ds.arr(start_point, 'code_length',
+                          dtype='float64')
+        if isinstance(end_point, YTArray):
+            self.end_point = \
+              self.ds.arr(end_point).to("code_length")
+        else:
+            self.end_point = \
+              self.ds.arr(end_point, 'code_length',
+                          dtype='float64')
         self.vec = self.end_point - self.start_point
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)


https://bitbucket.org/yt_analysis/yt/commits/434a048e99b6/
Changeset:   434a048e99b6
Branch:      yt
User:        brittonsmith
Date:        2016-09-03 06:54:57+00:00
Summary:     Incrementing answer number.
Affected #:  1 file

diff -r df408d25ac13eabc3a105feccb79cfc65dca1ebb -r 434a048e99b65b21e7a2fb62f54e34854d73f574 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,7 +67,7 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_004:
+  local_absorption_spectrum_005:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo


https://bitbucket.org/yt_analysis/yt/commits/4bc8ace93fda/
Changeset:   4bc8ace93fda
Branch:      yt
User:        ngoldbaum
Date:        2016-09-07 18:06:03+00:00
Summary:     Merged in brittonsmith/yt (pull request #2345)

LightRay enhancements and bugfix (closes Issue #1258)
Affected #:  12 files

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,13 +49,18 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with
-  ``use_minimum_datasets`` set to False, this parameter specifies the
-  fraction of the total box size to be traversed before rerandomizing the
-  projection axis and center.  This was invented to allow light rays with
-  thin slices to sample coherent large scale structure, but in practice
-  does not work so well.  Try setting this parameter to 1 and see what
-  happens.  Default: 0.0.
+* ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
+  maximum length a light ray segment can be in order to span the redshift interval
+  from one dataset to another.  If using a zoom-in simulation, this parameter can
+  be set to the length of the high resolution region so as to limit ray segments
+  to that size.  If the high resolution region is not cubical, the smallest side
+  should be used.  Default: 1.0 (the size of the box)
+
+* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
+  length of a ray, in terms of the size of the domain, before the trajectory
+  is re-randomized.  Set to 0 to have ray trajectory randomized for every
+  dataset.  Set to np.inf (infinity) to use a single trajectory for the
+  entire ray.  Default: 0.0.
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
@@ -67,7 +72,7 @@
 ---------------------
 
 Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay,make_light_ray`
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
 function will trace out the rays in each dataset and collect information for all the
 fields requested.  The output file will be an HDF5 file containing all the
 cell field values for all the cells that were intersected by the ray.  A
@@ -85,6 +90,21 @@
 
 * ``seed`` (*int*): Seed for the random number generator.  Default: None.
 
+* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
+  boundaries.  If False, ray trajectories will not be periodic.  Default : True.
+
+* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
+  region in which rays are to be generated.  If None, the left edge will be
+  that of the domain.  Default: None.
+
+* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
+  the region in which rays are to be generated.  If None, the right edge
+  will be that of the domain.  Default: None.
+
+* ``min_level`` (*int*): The minimum refinement level of the spatial region in
+  which the ray passes.  This can be used with zoom-in simulations where the
+  high resolution region does not keep a constant geometry.  Default: None.
+
 * ``start_position`` (*list* of floats): Used only if creating a light ray
   from a single dataset.  The coordinates of the starting position of the
   ray.  Default: None.
@@ -122,7 +142,82 @@
   slice and 1 to have all processors work together on each projection.
   Default: 1
 
-.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
+Useful Tips for Making LightRays
+--------------------------------
+
+Below are some tips that may come in handy for creating proper LightRays.
+
+How many snapshots do I need?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of snapshots required to traverse some redshift interval depends
+on the simulation box size and cosmological parameters.  Before running an
+expensive simulation only to find out that you don't have enough outputs
+to span the redshift interval you want, have a look at
+:ref:`planning-cosmology-simulations`.  The functionality described there
+will allow you to calculate the precise number of snapshots and specific
+redshifts at which they should be written.
+
+My snapshots are too far apart!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
+allows the user to control how long a ray segment can be for an
+individual dataset.  Be default, the `LightRay` generator will try to
+make segments no longer than the size of the box to avoid sampling the
+same structures more than once.  However, this can be increased in the
+case that the redshift interval between datasets is longer than the
+box size.  Increasing this value should be done with caution as longer
+ray segments run a greater risk of coming back to somewhere near their
+original position.
+
+What if I have a zoom-in simulation?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A zoom-in simulation has a high resolution region embedded within a
+larger, low resolution volume.  In this type of simulation, it is likely
+that you will want the ray segments to stay within the high resolution
+region.  To do this, you must first specify the size of the high
+resolution region when creating the `LightRay` using the
+``max_box_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+account the high resolution region and not the full box size.  If your
+high resolution region is not a perfect cube, specify the smallest side.
+Then, in the call to
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
+use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
+precise location of the high resolution region.
+
+Technically speaking, the ray segments should no longer be periodic
+since the high resolution region is only a sub-volume within the
+larger domain.  To make the ray segments non-periodic, set the
+``periodic`` keyword to False.  The LightRay generator will continue
+to generate randomly oriented segments until it finds one that fits
+entirely within the high resolution region.  If you have a high
+resolution region that can move and change shape slightly as structure
+forms, use the `min_level` keyword to mandate that the ray segment only
+pass through cells that are refined to at least some minimum level.
+
+If the size of the high resolution region is not large enough to
+span the required redshift interval, the `LightRay` generator can
+be configured to treat the high resolution region as if it were
+periodic simply by setting the ``periodic`` keyword to True.  This
+option should be used with caution as it will lead to the creation
+of disconnected ray segments within a single dataset.
+
+I want a continous trajectory over the entire ray.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set the ``minimum_coherent_box_fraction`` keyword argument to a very
+large number, like infinity (`numpy.inf`).
+
+.. note::
+
+   As of :code:`yt-3.0`, the functionality for recording properties of
+   the nearest halo to each element of the ray no longer exists.  This
+   is still available in :code:`yt-2.x`.  If you would like to use this
+   feature in :code:`yt-3.x`, help is needed to port it over.  Contact
+   the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -4,7 +4,7 @@
 ===================================================
 
 If you want to run a cosmological simulation that will have just enough data
-outputs to create a cosmology splice, the
+outputs to create a light cone or light ray, the
 :meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
 function will calculate a list of redshifts outputs that will minimally
 connect a redshift interval.

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,7 +67,7 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_004:
+  local_absorption_spectrum_005:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -67,7 +69,11 @@
         max_box_fraction : float
             In terms of the size of the domain, the maximum length a light
             ray segment can be in order to span the redshift interval from
-            one dataset to another.
+            one dataset to another.  If using a zoom-in simulation, this
+            parameter can be set to the length of the high resolution
+            region so as to limit ray segments to that size.  If the
+            high resolution region is not cubical, the smallest side
+            should be used.
             Default: 1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
@@ -115,6 +121,7 @@
                 output['next'] = self.splice_outputs[i + 1]
 
         # Calculate maximum delta z for each data dump.
+        self.max_box_fraction = max_box_fraction
         self._calculate_deltaz_max()
 
         # Calculate minimum delta z for each data dump.
@@ -144,7 +151,7 @@
             self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
             cosmology_splice.append(self.splice_outputs[0])
             z = cosmology_splice[-1]["redshift"]
-            z_target = z - max_box_fraction * cosmology_splice[-1]["dz_max"]
+            z_target = z - cosmology_splice[-1]["dz_max"]
 
             # fill redshift space with datasets
             while ((z_target > near_redshift) and
@@ -172,7 +179,7 @@
 
                 cosmology_splice.append(current_slice)
                 z = current_slice["redshift"]
-                z_target = z - max_box_fraction * current_slice["dz_max"]
+                z_target = z - current_slice["dz_max"]
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
@@ -199,8 +206,8 @@
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
         
-        # change the 'next' and 'previous' pointers to point to the correct outputs for the created
-        # splice
+        # change the 'next' and 'previous' pointers to point to the correct outputs
+        # for the created splice
         for i, output in enumerate(cosmology_splice):
             if len(cosmology_splice) == 1:
                 output['previous'] = None
@@ -264,7 +271,8 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
+                                              self.max_box_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -282,72 +290,23 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
+        target_distance = self.simulation.box_size * \
+          self.max_box_fraction
+        for output in self.splice_outputs:
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
 
-        target_distance = self.simulation.box_size
-
-        for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
-            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,10 +316,13 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        v = min(v, 0.9 * c)
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -79,21 +79,23 @@
     max_box_fraction : optional, float
         In terms of the size of the domain, the maximum length a light
         ray segment can be in order to span the redshift interval from
-        one dataset to another.
+        one dataset to another.  If using a zoom-in simulation, this
+        parameter can be set to the length of the high resolution
+        region so as to limit ray segments to that size.  If the
+        high resolution region is not cubical, the smallest side
+        should be used.
         Default: 1.0 (the size of the box)
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
         datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
-        Used with use_minimum_datasets set to False, this parameter
-        specifies the fraction of the total box size to be traversed
-        before rerandomizing the projection axis and center.  This
-        was invented to allow light rays with thin slices to sample
-        coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.  
-        Do not use for simple rays.
-        Default: 0.0.
+        Use to specify the minimum length of a ray, in terms of the
+        size of the domain, before the trajectory is re-randomized.
+        Set to 0 to have ray trajectory randomized for every dataset.
+        Set to np.inf (infinity) to use a single trajectory for the
+        entire ray.
+        Default: 0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -123,6 +125,11 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
+        if near_redshift is not None and far_redshift is not None and \
+          near_redshift >= far_redshift:
+            raise RuntimeError(
+                "near_redshift must be less than far_redshift.")
+
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.use_minimum_datasets = use_minimum_datasets
@@ -154,8 +161,7 @@
                 self.cosmology = Cosmology(
                     hubble_constant=self.ds.hubble_constant,
                     omega_matter=self.ds.omega_matter,
-                    omega_lambda=self.ds.omega_lambda,
-                    unit_registry=self.ds.unit_registry)
+                    omega_lambda=self.ds.omega_lambda)
             else:
                 redshift = 0.
             self.light_ray_solution.append({"filename": self.parameter_filename,
@@ -169,20 +175,23 @@
             CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
-              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
-                                           minimal=self.use_minimum_datasets,
-                                           max_box_fraction=max_box_fraction,
-                                           deltaz_min=self.deltaz_min,
-                                           time_data=time_data,
-                                           redshift_data=redshift_data)
+              self.create_cosmology_splice(
+                  self.near_redshift, self.far_redshift,
+                  minimal=self.use_minimum_datasets,
+                  max_box_fraction=max_box_fraction,
+                  deltaz_min=self.deltaz_min,
+                  time_data=time_data,
+                  redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
+                                      left_edge=None, right_edge=None,
+                                      min_level=None, periodic=True,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        np.random.seed(seed)
+        my_random = np.random.RandomState(seed)
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
@@ -192,9 +201,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.asarray(start_position)
+            self.light_ray_solution[0]['start'] = start_position
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.asarray(end_position)
+                self.light_ray_solution[0]['end'] = end_position
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -228,29 +237,40 @@
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
-                # enough of the box has been used,
-                # or if box_fraction_used will be greater than 1 after this slice.
-                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                        (box_fraction_used >
-                         self.minimum_coherent_box_fraction) or \
-                        (box_fraction_used +
-                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                    # Random start point
-                    self.light_ray_solution[q]['start'] = np.random.random(3)
-                    theta = np.pi * np.random.random()
-                    phi = 2 * np.pi * np.random.random()
-                    box_fraction_used = 0.0
+                # enough of the box has been used.
+                if (q == 0) or (box_fraction_used >=
+                                self.minimum_coherent_box_fraction):
+                    if periodic:
+                        self.light_ray_solution[q]['start'] = left_edge + \
+                          (right_edge - left_edge) * my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
+                        box_fraction_used = 0.0
+                    else:
+                        ds = load(self.light_ray_solution[q]["filename"])
+                        ray_length = \
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
+                                  "unitary")
+                        self.light_ray_solution[q]['start'], \
+                          self.light_ray_solution[q]['end'] = \
+                          non_periodic_ray(ds, left_edge, right_edge, ray_length,
+                                           my_random=my_random, min_level=min_level)
+                        del ds
                 else:
-                    # Use end point of previous segment and same theta and phi.
+                    # Use end point of previous segment, adjusted for periodicity,
+                    # and the same trajectory.
                     self.light_ray_solution[q]['start'] = \
-                      self.light_ray_solution[q-1]['end'][:]
+                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
+                                      left=left_edge, right=right_edge)
 
-                self.light_ray_solution[q]['end'] = \
-                  self.light_ray_solution[q]['start'] + \
-                    self.light_ray_solution[q]['traversal_box_fraction'] * \
-                    np.array([np.cos(phi) * np.sin(theta),
-                              np.sin(phi) * np.sin(theta),
-                              np.cos(theta)])
+                if "end" not in self.light_ray_solution[q]:
+                    self.light_ray_solution[q]['end'] = \
+                      self.light_ray_solution[q]['start'] + \
+                        self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        self.simulation.box_size * \
+                        np.array([np.cos(phi) * np.sin(theta),
+                                  np.sin(phi) * np.sin(theta),
+                                  np.cos(theta)])
                 box_fraction_used += \
                   self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -261,15 +281,18 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None,
+    def make_light_ray(self, seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=None, use_peculiar_velocity=True, 
-                       redshift=None, njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True,
+                       redshift=None, field_parameters=None, njobs=-1):
         """
-        make_light_ray(seed=None, start_position=None, end_position=None,
+        make_light_ray(seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
+                       start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        use_peculiar_velocity=True, redshift=None,
@@ -285,6 +308,29 @@
         seed : optional, int
             Seed for the random number generator.
             Default: None.
+        periodic : optional, bool
+            If True, ray trajectories will make use of periodic
+            boundaries.  If False, ray trajectories will not be
+            periodic.
+            Default : True.
+        left_edge : optional, iterable of floats or YTArray
+            The left corner of the region in which rays are to be
+            generated.  If None, the left edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        right_edge : optional, iterable of floats or YTArray
+            The right corner of the region in which rays are to be
+            generated.  If None, the right edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        min_level : optional, int
+            The minimum refinement level of the spatial region in which
+            the ray passes.  This can be used with zoom-in simulations
+            where the high resolution region does not keep a constant
+            geometry.
+            Default: None.
         start_position : optional, iterable of floats or YTArray.
             Used only if creating a light ray from a single dataset.
             The coordinates of the starting position of the ray.
@@ -363,30 +409,56 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if self.simulation_type is None:
+            domain = self.ds
+        else:
+            domain = self.simulation
 
-        if start_position is not None and hasattr(start_position, 'units'):
-            start_position = start_position.to('unitary')
-        elif start_position is not None :
-            start_position = self.ds.arr(
-                start_position, 'code_length').to('unitary')
+        assumed_units = "code_length"
+        if left_edge is None:
+            left_edge = domain.domain_left_edge
+        elif not hasattr(left_edge, 'units'):
+            left_edge = domain.arr(left_edge, assumed_units)
+        left_edge.convert_to_units('unitary')
 
-        if end_position is not None and hasattr(end_position, 'units'):
-            end_position = end_position.to('unitary')
-        elif end_position is not None :
-            end_position = self.ds.arr(
-                end_position, 'code_length').to('unitary')
+        if right_edge is None:
+            right_edge = domain.domain_right_edge
+        elif not hasattr(right_edge, 'units'):
+            right_edge = domain.arr(right_edge, assumed_units)
+        right_edge.convert_to_units('unitary')
+
+        if start_position is not None:
+            if hasattr(start_position, 'units'):
+                start_position = start_position
+            else:
+                start_position = self.ds.arr(start_position, assumed_units)
+            start_position.convert_to_units('unitary')
+
+        if end_position is not None:
+            if hasattr(end_position, 'units'):
+                end_position = end_position
+            else:
+                end_position = self.ds.arr(end_position, assumed_units)
+            end_position.convert_to_units('unitary')
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
-            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
+            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
+                       "Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
+                                           left_edge=left_edge,
+                                           right_edge=right_edge,
+                                           min_level=min_level, periodic=periodic,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
 
+        if field_parameters is None:
+            field_parameters = {}
+
         # Initialize data structures.
         self._data = {}
         # temperature field is automatically added to fields
@@ -427,19 +499,11 @@
             if setup_function is not None:
                 setup_function(ds)
 
-            if start_position is not None:
-                my_segment["start"] = ds.arr(my_segment["start"], "unitary")
-                my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-            else:
-                my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                  ds.domain_left_edge
-                my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                  ds.domain_left_edge
-
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
-                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
+                  not my_segment["traversal_box_fraction"].units.is_dimensionless:
                     segment_length = \
                       my_segment["traversal_box_fraction"].in_units("Mpccm / h")
                 else:
@@ -453,18 +517,18 @@
             else:
                 next_redshift = my_segment['next']['redshift']
 
+            # Make sure start, end, left, right
+            # are using the dataset's unit system.
+            my_start = ds.arr(my_segment['start'])
+            my_end   = ds.arr(my_segment['end'])
+            my_left  = ds.arr(left_edge)
+            my_right = ds.arr(right_edge)
             mylog.info("Getting segment at z = %s: %s to %s." %
-                       (my_segment['redshift'], my_segment['start'],
-                        my_segment['end']))
-
-            # Convert segment units from unitary to code length for sub_ray
-            my_segment['start'] = my_segment['start'].to('code_length')
-            my_segment['end'] = my_segment['end'].to('code_length')
+                       (my_segment['redshift'], my_start, my_end))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=ds.domain_left_edge,
-                                        right=ds.domain_right_edge)
+            sub_segments = periodic_ray(my_start, my_end,
+                                        left=my_left, right=my_right)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -477,6 +541,8 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = ds.ray(sub_segment[0], sub_segment[1])
+                for key, val in field_parameters.items():
+                    sub_ray.set_field_parameter(key, val)
                 asort = np.argsort(sub_ray["t"])
                 sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,
@@ -515,7 +581,7 @@
                     # sight) and the velocity vectors: a dot b = ab cos(theta)
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
-                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
                     # Protect against stituations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
@@ -535,8 +601,7 @@
             # Get redshift for each lixel.  Assume linear relation between l 
             # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'],
-                                                my_segment['end']).in_cgs())
+                (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
@@ -672,6 +737,22 @@
 
     return np.sqrt(np.power((end - start), 2).sum())
 
+def periodic_adjust(p, left=None, right=None):
+    """
+    Return the point p adjusted for periodic boundaries.
+
+    """
+    if isinstance(p, YTArray):
+        p.convert_to_units("unitary")
+    if left is None:
+        left = np.zeros_like(p)
+    if right is None:
+        right = np.ones_like(p)
+
+    w = right - left
+    p -= left
+    return np.mod(p, w)
+
 def periodic_distance(coord1, coord2):
     """
     periodic_distance(coord1, coord2)
@@ -713,7 +794,7 @@
     dim = right - left
 
     vector = end - start
-    wall = np.zeros(start.shape)
+    wall = np.zeros_like(start)
     close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
@@ -733,7 +814,6 @@
     this_end = end.copy()
     t = 0.0
     tolerance = 1e-6
-
     while t < 1.0 - tolerance:
         hit_left = (this_start <= left) & (vector < 0)
         if (hit_left).any():
@@ -751,8 +831,44 @@
         now = this_start + vector * dt
         close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([np.copy(this_start), np.copy(now)])
+        segments.append([this_start.copy(), now.copy()])
         this_start = now.copy()
         t += dt
 
     return segments
+
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
+                     min_level=None, my_random=None):
+
+    max_length = vector_length(left_edge, right_edge)
+    if ray_length > max_length:
+        raise RuntimeError(
+            ("The maximum segment length in the region %s to %s is %s, " +
+             "but the ray length requested is %s.  Decrease ray length.") %
+             (left_edge, right_edge, max_length, ray_length))
+
+    if my_random is None:
+        my_random = np.random.RandomState()
+    i = 0
+    while True:
+        start = my_random.random_sample(3) * \
+          (right_edge - left_edge) + left_edge
+        theta = np.pi * my_random.random_sample()
+        phi = 2 * np.pi * my_random.random_sample()
+        end = start + ray_length * \
+          np.array([np.cos(phi) * np.sin(theta),
+                    np.sin(phi) * np.sin(theta),
+                    np.cos(theta)])
+        i += 1
+        test_ray = ds.ray(start, end)
+        if (end >= left_edge).all() and (end <= right_edge).all() and \
+          (min_level is None or min_level <= 0 or
+           (test_ray["grid_level"] >= min_level).all()):
+            mylog.info("Found ray after %d attempts." % i)
+            del test_ray
+            return start, end
+        del test_ray
+        if i > max_iter:
+            raise RuntimeError(
+                ("Failed to create segment in %d attempts.  " +
+                 "Decreasing ray length is recommended") % i)

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -10,6 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.testing import \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
@@ -41,6 +43,48 @@
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nested():
+    """
+    This test generates a cosmological light ray confing the ray to a subvolume
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    left = np.ones(3) * 0.25
+    right = np.ones(3) * 0.75
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nonperiodic():
+    """
+    This test generates a cosmological light ray using non-periodic segments
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, periodic=False,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
 def test_light_ray_non_cosmo():

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -197,10 +197,20 @@
     def __init__(self, start_point, end_point, ds=None,
                  field_parameters=None, data_source=None):
         super(YTRay, self).__init__(ds, field_parameters, data_source)
-        self.start_point = self.ds.arr(start_point,
-                            'code_length', dtype='float64')
-        self.end_point = self.ds.arr(end_point,
-                            'code_length', dtype='float64')
+        if isinstance(start_point, YTArray):
+            self.start_point = \
+              self.ds.arr(start_point).to("code_length")
+        else:
+            self.start_point = \
+              self.ds.arr(start_point, 'code_length',
+                          dtype='float64')
+        if isinstance(end_point, YTArray):
+            self.end_point = \
+              self.ds.arr(end_point).to("code_length")
+        else:
+            self.end_point = \
+              self.ds.arr(end_point, 'code_length',
+                          dtype='float64')
         self.vec = self.end_point - self.start_point
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -868,8 +868,7 @@
             self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
-                              omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry)
+                              omega_lambda=self.omega_lambda)
             self.critical_density = \
                     self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -110,6 +110,8 @@
         self.domain_right_edge = self.domain_right_edge * self.length_unit
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_length", self.length_unit)
+        self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                               self.length_unit.units.dimensions)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -102,6 +102,8 @@
             self.box_size = self.box_size * self.length_unit
             self.domain_left_edge = self.domain_left_edge * self.length_unit
             self.domain_right_edge = self.domain_right_edge * self.length_unit
+            self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                                   self.length_unit.units.dimensions)
         else:
             # Read time from file for non-cosmological sim
             self.time_unit = self.quan(

diff -r 9ccda6b503c68b4944523f4dc8afb5dbbfb94b1e -r 4bc8ace93fda81e800a2e90c052c431c2d48a01b yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -33,7 +33,14 @@
 
     For an explanation of the various cosmological measures, see, for example 
     Hogg (1999, http://xxx.lanl.gov/abs/astro-ph/9905116).
-    
+
+    WARNING: Cosmological distance calculations return values that are either
+    in the comoving or proper frame, depending on the specific quantity.  For
+    simplicity, the proper and comoving frames are set equal to each other
+    within the cosmology calculator.  This means that for some distance value,
+    x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+    care to understand which reference frame is correct for the given calculation.
+
     Parameters
     ----------
     hubble_constant : float
@@ -58,7 +65,7 @@
     >>> from yt.utilities.cosmology import Cosmology
     >>> co = Cosmology()
     >>> print(co.hubble_time(0.0).in_units("Gyr"))
-    
+
     """
     def __init__(self, hubble_constant = 0.71,
                  omega_matter = 0.27,
@@ -66,9 +73,9 @@
                  omega_curvature = 0.0,
                  unit_registry = None,
                  unit_system = "cgs"):
-        self.omega_matter = omega_matter
-        self.omega_lambda = omega_lambda
-        self.omega_curvature = omega_curvature
+        self.omega_matter = float(omega_matter)
+        self.omega_lambda = float(omega_lambda)
+        self.omega_curvature = float(omega_curvature)
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list