[yt-svn] commit/yt: 26 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jul 14 10:58:35 PDT 2014


26 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/59a415b02958/
Changeset:   59a415b02958
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-06-21 12:55:30
Summary:     Adding comoving units and modify h in unit registry in cosmology calculator.
Affected #:  1 file

diff -r 3e627557e0bb1f730575c4990e6fc0f940e7a147 -r 59a415b02958d69ba94ce9fae13eef0d8d3b6f16 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -16,6 +16,7 @@
 import functools
 import numpy as np
 
+from yt.units import dimensions
 from yt.units.unit_registry import \
      UnitRegistry
 from yt.units.yt_array import \
@@ -67,6 +68,12 @@
         if unit_registry is None:
             unit_registry = UnitRegistry()
         self.unit_registry = unit_registry
+        self.unit_registry.modify("h", hubble_constant)
+        for my_unit in ["m", "pc", "AU", "au"]:
+            new_unit = "%scm" % my_unit
+            # technically not true, but distances here are actually comoving
+            self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0],
+                                   dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
         self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc")
 
     def hubble_distance(self):
@@ -74,7 +81,7 @@
         The distance corresponding to c / h, where c is the speed of light 
         and h is the Hubble parameter in units of 1 / time.
         """
-        return (speed_of_light_cgs / self.hubble_constant).in_cgs()
+        return self.quan((speed_of_light_cgs / self.hubble_constant)).in_cgs()
 
     def comoving_radial_distance(self, z_i, z_f):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/5661f216d6fc/
Changeset:   5661f216d6fc
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-06-21 21:45:07
Summary:     Updating cosmology splice and EnzoSimulation.
Affected #:  2 files

diff -r 59a415b02958d69ba94ce9fae13eef0d8d3b6f16 -r 5661f216d6fc30097bb8fe64708623456443cebd yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -78,8 +78,9 @@
 
         Examples
         --------
-        >>> cosmo = es.create_cosmology_splice(1.0, 0.0, minimal=True,
-                                               deltaz_min=0.0)
+
+        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
+        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)
 
         """
 
@@ -133,12 +134,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (np.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.abs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.abs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -153,20 +154,20 @@
 
                     if current_slice is cosmology_splice[-1]:
                         near_redshift = cosmology_splice[-1]['redshift'] - \
-                          cosmology_splice[-1]['deltazMax']
+                          cosmology_splice[-1]['dz_max']
                         mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                         break
                     else:
                         cosmology_splice.append(current_slice)
 
                 z = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
-                                                                    obj['redshift']))
+            self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
+                                                           obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
 
@@ -175,14 +176,14 @@
                 if (nextOutput['redshift'] <= near_redshift):
                     break
                 if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
-                    cosmology_splice[-1]['deltazMin']):
+                    cosmology_splice[-1]['dz_min']):
                     cosmology_splice.append(nextOutput)
                 nextOutput = nextOutput['next']
             if (cosmology_splice[-1]['redshift'] -
-                cosmology_splice[-1]['deltazMax']) > near_redshift:
+                cosmology_splice[-1]['dz_max']) > near_redshift:
                 mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                 near_redshift = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
@@ -253,7 +254,7 @@
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
-            outputs.append({'redshift': z, 'deltazMax': deltaz_max})
+            outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
         mylog.info("%d data dumps will be needed to get from z = %f to %f." %
@@ -282,28 +283,24 @@
             # at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.1 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
-            output['deltazMax'] = np.fabs(z2 - z)
-
+            output['dz_max'] = np.abs(z2 - z)
+            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
@@ -322,28 +319,24 @@
             # top grid pixel at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.01 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
+            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,24 +350,20 @@
         # box at a given redshift.
         z1 = z
         z2 = z1 - 0.1 # just an initial guess
-        distance1 = 0.0
+        distance1 = self.simulation.quan(0.0, "Mpccm / h")
+        distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1
 
-        # Convert comoving radial distance into Mpc / h,
-        # since that's how box size is stored.
-        distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-          self.cosmology.hubble_constant
-
-        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
-            z2 = ((target_distance - distance2) / m) + z2
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.cosmology.hubble_constant
+            z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration += 1
             if (iteration > max_Iterations):
-                mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, np.fabs(z2 - z)))
+                mylog.error("deltaz_forward: Warning - max iterations " +
+                            "exceeded for z = %f (delta z = %f)." %
+                            (z, np.abs(z2 - z)))
                 break
-        return np.fabs(z2 - z)
+        return np.abs(z2 - z)

diff -r 59a415b02958d69ba94ce9fae13eef0d8d3b6f16 -r 5661f216d6fc30097bb8fe64708623456443cebd yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -95,6 +95,16 @@
                             unit_registry=self.unit_registry)
 
             self.time_unit = self.enzo_cosmology.time_unit.in_units("s")
+            self.unit_registry.modify("h", self.hubble_constant)
+            # Comoving lengths
+            for my_unit in ["m", "pc", "AU", "au"]:
+                new_unit = "%scm" % my_unit
+                # technically not true, but should be ok
+                self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0],
+                                       dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
+            self.length_unit = self.quan(self.box_size, "Mpccm / h",
+                                         registry=self.unit_registry)
+            self.box_size = self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)


https://bitbucket.org/yt_analysis/yt/commits/c7769ba6618f/
Changeset:   c7769ba6618f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-06-22 23:29:32
Summary:     Updating light ray tool for units and removing all the halo profiler extensions as the halo profiler no longer exists.
Affected #:  1 file

diff -r 5661f216d6fc30097bb8fe64708623456443cebd -r c7769ba6618ff0bbc7c89cb42c1027e8e5ea80b5 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,8 +21,6 @@
 
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
      CosmologySplice
-from yt.analysis_modules.halo_profiler.multi_halo_profiler import \
-     HaloProfiler
 from yt.convenience import load
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
@@ -102,7 +100,6 @@
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
 
         self.light_ray_solution = []
-        self.halo_lists = {}
         self._data = {}
 
         # Get list of datasets for light ray solution.
@@ -126,9 +123,11 @@
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
             if len(self.light_ray_solution) > 1:
-                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+                raise RuntimeError("LightRay Error: cannot specify start_position " + \
+                                   "if light ray uses more than one dataset.")
             if not ((end_position is None) ^ (trajectory is None)):
-                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+                raise RuntimeError("LightRay Error: must specify either end_position " + \
+                                   "or trajectory, but not both.")
             self.light_ray_solution[0]['start'] = np.array(start_position)
             if end_position is not None:
                 self.light_ray_solution[0]['end'] = np.array(end_position)
@@ -159,10 +158,9 @@
 
                 # Calculate fraction of box required for a depth of delta z
                 self.light_ray_solution[q]['traversal_box_fraction'] = \
-                    self.cosmology.ComovingRadialDistance(\
-                    z_next, self.light_ray_solution[q]['redshift']) * \
-                    self.simulation.hubble_constant / \
-                    self.simulation.box_size
+                    self.cosmology.comoving_radial_distance(z_next, \
+                        self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
+                        self.simulation.box_size
 
                 # Simple error check to make sure more than 100% of box depth
                 # is never required.
@@ -214,12 +212,8 @@
                        trajectory=None,
                        fields=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=False,
-                       get_nearest_halo=False,
-                       nearest_halo_fields=None,
-                       halo_list_file=None,
-                       halo_profiler_parameters=None,
-                       njobs=1, dynamic=False):
+                       get_los_velocity=True,
+                       njobs=-1):
         """
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
@@ -257,108 +251,29 @@
         get_los_velocity : bool
             If True, the line of sight velocity is calculated for
             each point in the ray.
-            Default: False.
-        get_nearest_halo : bool
-            If True, the HaloProfiler will be used to calculate the
-            distance and mass of the nearest halo for each point in the
-            ray.  This option requires additional information to be
-            included.  See below for an example.
-            Default: False.
-        nearest_halo_fields : list
-            A list of fields to be calculated for the halos nearest to
-            every lixel in the ray.
-            Default: None.
-        halo_list_file : str
-            Filename containing a list of halo properties to be used 
-            for getting the nearest halos to absorbers.
-            Default: None.
-        halo_profiler_parameters: dict
-            A dictionary of parameters to be passed to the HaloProfiler
-            to create the appropriate data used to get properties for
-            the nearest halos.
-            Default: None.
+            Default: True.
         njobs : int
-            The number of parallel jobs over which the slices for the
-            halo mask will be split.  Choose -1 for one processor per
-            individual slice and 1 to have all processors work together
-            on each projection.
-            Default: 1
-        dynamic : bool
-            If True, use dynamic load balancing to create the projections.
-            Default: False.
-
-        Notes
-        -----
-
-        The light ray tool will use the HaloProfiler to calculate the
-        distance and mass of the nearest halo to that pixel.  In order
-        to do this, a dictionary called halo_profiler_parameters is used
-        to pass instructions to the HaloProfiler.  This dictionary has
-        three additional keywords.
-
-        halo_profiler_kwargs : dict
-            A dictionary of standard HaloProfiler keyword
-            arguments and values to be given to the HaloProfiler.
-
-        halo_profiler_actions : list
-            A list of actions to be performed by the HaloProfiler.
-            Each item in the list should be a dictionary with the following
-            entries: "function", "args", and "kwargs", for the function to
-            be performed, the arguments supplied to that function, and the
-            keyword arguments.
-
-        halo_list : string
-            'all' to use the full halo list, or 'filtered' to use the
-            filtered halo list created after calling make_profiles.
+            The number of parallel jobs over which the segments will 
+            be split.  Choose -1 for one processor per segment.
+            Default: -1.
 
         Examples
         --------
 
         >>> from yt.mods import *
-        >>> from yt.analysis_modules.halo_profiler.api import *
-        >>> from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
-        >>> halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
-        >>> halo_profiler_actions = []
-        >>> # Add a virial filter.
-        >>> halo_profiler_actions.append({'function': add_halo_filter,
-        ...                           'args': VirialFilter,
-        ...                           'kwargs': {'overdensity_field': 'ActualOverdensity',
-        ...                                      'virial_overdensity': 200,
-        ...                                      'virial_filters': [['TotalMassMsun','>=','1e14']],
-        ...                                      'virial_quantities': ['TotalMassMsun','RadiusMpc']}})
-        ...
-        >>> # Make the profiles.
-        >>> halo_profiler_actions.append({'function': make_profiles,
-        ...                           'args': None,
-        ...                           'kwargs': {'filename': 'VirializedHalos.h5'}})
-        ...
-        >>> halo_list = 'filtered'
-        >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
-        ...                             halo_profiler_actions=halo_profiler_actions,
-        ...                             halo_list=halo_list)
-        ...
-        >>> my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
-        ...                use_minimum_datasets=True,
-        ...                time_data=False)
+        >>> from yt.analysis_modules.cosmological_analysis.light_ray.api import \
+        ...     LightRay
+        >>> my_ray = LightRay('enzo_tiny_simulation/32Mpc_32.enzo', 'Enzo',
+        ...                   0., 0.1, time_data=False)
         ...
         >>> my_ray.make_light_ray(seed=12345,
-        ...                   solution_filename='solution.txt',
-        ...                   data_filename='my_ray.h5',
-        ...                   fields=['Temperature', 'Density'],
-        ...                   get_nearest_halo=True,
-        ...                   nearest_halo_fields=['TotalMassMsun_100',
-        ...                                        'RadiusMpc_100'],
-        ...                   halo_list_file='VirializedHalos.h5',
-        ...                   halo_profiler_parameters=halo_profiler_parameters,
-        ...                   get_los_velocity=True)
+        ...                       solution_filename='solution.txt',
+        ...                       data_filename='my_ray.h5',
+        ...                       fields=['temperature', 'density'],
+        ...                       get_los_velocity=True)
         
         """
 
-        if halo_profiler_parameters is None:
-            halo_profiler_parameters = {}
-        if nearest_halo_fields is None:
-            nearest_halo_fields = []
-
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed, 
                                            start_position=start_position, 
@@ -372,11 +287,6 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
-        if get_nearest_halo:
-            all_fields.extend(['x', 'y', 'z', 'nearest_halo'])
-            all_fields.extend(['nearest_halo_%s' % field \
-                               for field in nearest_halo_fields])
-            data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['x-velocity', 'y-velocity',
                                'z-velocity', 'los_velocity'])
@@ -385,18 +295,20 @@
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
-                                                       njobs=njobs, dynamic=dynamic):
+                                                       njobs=njobs):
 
             # Load dataset for segment.
             pf = load(my_segment['filename'])
+            my_segment["start"] = pf.domain_width * my_segment["start"] +\
+              pf.domain_left_edge
+            my_segment["end"] = pf.domain_width * my_segment["end"] +\
+              pf.domain_left_edge
 
             if self.near_redshift == self.far_redshift:
-                h_vel = cm_per_km * pf.units['mpc'] * \
-                  vector_length(my_segment['start'], my_segment['end']) * \
-                  self.cosmology.HubbleConstantNow * \
-                  self.cosmology.ExpansionFactor(my_segment['redshift'])
-                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
-                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+                next_redshift = my_segment["redshift"] - \
+                  self._deltaz_forward(my_segment["redshift"], 
+                                       pf.domain_width[0].in_units("Mpccm / h") *
+                                       my_segment["traversal_box_fraction"])
             elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
@@ -407,56 +319,49 @@
                         my_segment['end']))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
+            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
+                                        left=pf.domain_left_edge,
+                                        right=pf.domain_right_edge)
 
             # Prepare data structure for subsegment.
             sub_data = {}
             sub_data['segment_redshift'] = my_segment['redshift']
             for field in all_fields:
-                sub_data[field] = np.array([])
+                sub_data[field] = []
 
             # Get data for all subsegments in segment.
             for sub_segment in sub_segments:
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'] = np.concatenate([sub_data['dl'],
-                                                 (sub_ray['dts'] *
-                                                  vector_length(sub_segment[0],
-                                                                sub_segment[1]))])
+                sub_data['dl'].extend(sub_ray['dts'] *
+                                      vector_length(sub_ray.start_point,
+                                                    sub_ray.end_point))
                 for field in data_fields:
-                    sub_data[field] = np.concatenate([sub_data[field],
-                                                      (sub_ray[field])])
+                    sub_data[field].extend(sub_ray[field])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = np.array([sub_ray['x-velocity'],
-                                        sub_ray['y-velocity'],
-                                        sub_ray['z-velocity']])
-                    sub_data['los_velocity'] = \
-                      np.concatenate([sub_data['los_velocity'],
-                                      (np.rollaxis(sub_vel, 1) *
-                                       line_of_sight).sum(axis=1)])
+                    sub_vel = pf.arr([sub_ray['x-velocity'],
+                                      sub_ray['y-velocity'],
+                                      sub_ray['z-velocity']])
+                    sub_data['los_velocity'].extend((np.rollaxis(sub_vel, 1) *
+                                                     line_of_sight).sum(axis=1))
                     del sub_vel
 
                 sub_ray.clear_data()
                 del sub_ray
 
+            for key in sub_data:
+                sub_data[key] = pf.arr(sub_data[key]).in_cgs()
+
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], my_segment['end']))
-            sub_data['redshift'] = my_segment['redshift'] \
-                - sub_data['dredshift'].cumsum() + sub_data['dredshift']
-
-            # Calculate distance to nearest object on halo list for each lixel.
-            if get_nearest_halo:
-                halo_list = self._get_halo_list(pf, fields=nearest_halo_fields,
-                                                filename=halo_list_file,
-                                                **halo_profiler_parameters)
-                sub_data.update(self._get_nearest_halo_properties(sub_data, halo_list,
-                                fields=nearest_halo_fields))
-                sub_data['nearest_halo'] *= pf.units['mpccm']
+                (sub_data['dl'] / vector_length(my_segment['start'], 
+                                                my_segment['end']).in_cgs())
+            sub_data['redshift'] = my_segment['redshift'] - \
+              sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()
@@ -464,13 +369,9 @@
                 sub_data[field] = sub_data[field][sub_dl_nonzero]
             del sub_dl_nonzero
 
-            # Convert dl to cm.
-            sub_data['dl'] *= pf.units['cm']
-
             # Add to storage.
             my_storage.result = sub_data
 
-            pf.h.clear_all_data()
             del pf
 
         # Reconstruct ray data from parallel_objects storage.
@@ -488,98 +389,6 @@
         self._data = all_data
         return all_data
 
-    def _get_halo_list(self, pf, fields=None, filename=None, 
-                       halo_profiler_kwargs=None,
-                       halo_profiler_actions=None, halo_list='all'):
-        "Load a list of halos for the pf."
-
-        if str(pf) in self.halo_lists:
-            return self.halo_lists[str(pf)]
-
-        if fields is None: fields = []
-
-        if filename is not None and \
-                os.path.exists(os.path.join(pf.fullpath, filename)):
-
-            my_filename = os.path.join(pf.fullpath, filename)
-            mylog.info("Loading halo list from %s." % my_filename)
-            my_list = {}
-            in_file = h5py.File(my_filename, 'r')
-            for field in fields + ['center']:
-                my_list[field] = in_file[field][:]
-            in_file.close()
-
-        else:
-            my_list = self._halo_profiler_list(pf, fields=fields,
-                                               halo_profiler_kwargs=halo_profiler_kwargs,
-                                               halo_profiler_actions=halo_profiler_actions,
-                                               halo_list=halo_list)
-
-        self.halo_lists[str(pf)] = my_list
-        return self.halo_lists[str(pf)]
-
-    def _halo_profiler_list(self, pf, fields=None, 
-                            halo_profiler_kwargs=None,
-                            halo_profiler_actions=None, halo_list='all'):
-        "Run the HaloProfiler to get the halo list."
-
-        if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
-        if halo_profiler_actions is None: halo_profiler_actions = []
-
-        hp = HaloProfiler(pf, **halo_profiler_kwargs)
-        for action in halo_profiler_actions:
-            if not action.has_key('args'): action['args'] = ()
-            if not action.has_key('kwargs'): action['kwargs'] = {}
-            action['function'](hp, *action['args'], **action['kwargs'])
-
-        if halo_list == 'all':
-            hp_list = copy.deepcopy(hp.all_halos)
-        elif halo_list == 'filtered':
-            hp_list = copy.deepcopy(hp.filtered_halos)
-        else:
-            mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-            hp_list = None
-
-        del hp
-
-        # Create position array from halo list.
-        return_list = dict([(field, []) for field in fields + ['center']])
-        for halo in hp_list:
-            for field in fields + ['center']:
-                return_list[field].append(halo[field])
-        for field in fields + ['center']:
-            return_list[field] = np.array(return_list[field])
-        return return_list
-        
-    def _get_nearest_halo_properties(self, data, halo_list, fields=None):
-        """
-        Calculate distance to nearest object in halo list for each lixel in data.
-        Return list of distances and other properties of nearest objects.
-        """
-
-        if fields is None: fields = []
-        field_data = dict([(field, np.zeros_like(data['x'])) \
-                           for field in fields])
-        nearest_distance = np.zeros_like(data['x'])
-
-        if halo_list['center'].size > 0:
-            for index in xrange(nearest_distance.size):
-                nearest = np.argmin(periodic_distance(np.array([data['x'][index],
-                                                                data['y'][index],
-                                                                data['z'][index]]),
-                                                      halo_list['center']))
-                nearest_distance[index] = periodic_distance(np.array([data['x'][index],
-                                                                      data['y'][index],
-                                                                      data['z'][index]]),
-                                                            halo_list['center'][nearest])
-                for field in fields:
-                    field_data[field][index] = halo_list[field][nearest]
-
-        return_data = {'nearest_halo': nearest_distance}
-        for field in fields:
-            return_data['nearest_halo_%s' % field] = field_data[field]
-        return return_data
-
     @parallel_root_only
     def _write_light_ray(self, filename, data):
         "Write light ray data to hdf5 file."
@@ -588,6 +397,7 @@
         output = h5py.File(filename, 'w')
         for field in data.keys():
             output.create_dataset(field, data=data[field])
+            output[field].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only
@@ -599,13 +409,14 @@
         if extra_info is not None:
             for par, val in extra_info.items():
                 f.write("%s = %s\n" % (par, val))
-        f.write("\nSegment Redshift dl/box    Start x       y             z             End x         y             z            Dataset\n")
+        f.write("\nSegment Redshift dl/box    Start x       y             " + \
+                "z             End x         y             z            Dataset\n")
         for q, my_segment in enumerate(self.light_ray_solution):
             f.write("%04d    %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \
-                        (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
-                         my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
-                         my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
-                         my_segment['filename']))
+                    (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
+                     my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
+                     my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
+                     my_segment['filename']))
         f.close()
 
 def _flatten_dict_list(data, exceptions=None):
@@ -616,10 +427,11 @@
     for datum in data:
         for field in [field for field in datum.keys()
                       if field not in exceptions]:
-            if field in new_data:
-                new_data[field] = np.concatenate([new_data[field], datum[field]])
-            else:
-                new_data[field] = np.copy(datum[field])
+            if field not in new_data:
+                new_data[field] = []
+            new_data[field].extend(datum[field])
+    for field in new_data:
+        new_data[field] = YTArray(new_data[field])
     return new_data
 
 def vector_length(start, end):
@@ -666,8 +478,8 @@
     close[no_bound] = np.min
 
     segments = []
-    this_start = np.copy(start)
-    this_end = np.copy(end)
+    this_start = start.copy()
+    this_end = end.copy()
     t = 0.0
     tolerance = 1e-6
 
@@ -681,14 +493,15 @@
             this_start[hit_right] -= dim[hit_right]
             this_end[hit_right] -= dim[hit_right]
 
-        nearest = np.array([close[q]([this_end[q], wall[q]]) \
-                                for q in range(start.size)])
+        nearest = vector.unit_array * \
+          np.array([close[q]([this_end[q], wall[q]]) \
+                    for q in range(start.size)])
         dt = ((nearest - this_start) / vector)[bound].min()
         now = this_start + vector * dt
-        close_enough = np.abs(now - nearest) < 1e-10
+        close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
         now[close_enough] = nearest[close_enough]
         segments.append([np.copy(this_start), np.copy(now)])
-        this_start = np.copy(now)
+        this_start = now.copy()
         t += dt
 
     return segments


https://bitbucket.org/yt_analysis/yt/commits/63ac8c15eed1/
Changeset:   63ac8c15eed1
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-06-25 15:10:51
Summary:     Adding test to ensure ray values are correctly ordered for amr data.
Affected #:  1 file

diff -r c7769ba6618ff0bbc7c89cb42c1027e8e5ea80b5 -r 63ac8c15eed13b097c8c1e24dfcd1228e101f2fd yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -42,3 +42,8 @@
             yield assert_rel_equal, my_ray['density'][ray_cells].sum(), \
                                     my_all['density'][my_cells].sum(), 14
             yield assert_rel_equal, my_ray['dts'].sum(), unitary, 14
+
+def test_amr_ray():
+    pf = fake_amr_pf()
+    ray = pf.ray(pf.domain_left_edge, pf.domain_right_edge)
+    yield assert_equal, (np.diff(ray["t"]) >= 0).all(), True


https://bitbucket.org/yt_analysis/yt/commits/565f6387bbc7/
Changeset:   565f6387bbc7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-25 21:05:18
Summary:     Sort ray objects.
Affected #:  2 files

diff -r 63ac8c15eed13b097c8c1e24dfcd1228e101f2fd -r 565f6387bbc75ccaf9b30dcdd1447e2c470765fb yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -67,6 +67,7 @@
     _key_fields = ['x','y','z','dx','dy','dz']
     _type_name = "ortho_ray"
     _con_args = ('axis', 'coords')
+    _sort_ind = None
     def __init__(self, axis, coords, pf=None, field_parameters=None):
         super(YTOrthoRayBase, self).__init__(pf, field_parameters)
         self.axis = axis
@@ -78,7 +79,14 @@
         self.px_dx = 'd%s'%('xyz'[self.px_ax])
         self.py_dx = 'd%s'%('xyz'[self.py_ax])
         self.px, self.py = coords
-        self.sort_by = 'xyz'[self.axis]
+        self._sort_by = 'xyz'[self.axis]
+
+    def __getitem__(self, key):
+        if self._sort_ind is None:
+            sv = super(YTOrthoRayBase, self).__getitem__(self._sort_by)
+            self._sort_ind = np.argsort(sv)
+        v = super(YTOrthoRayBase, self).__getitem__(key)
+        return v[self._sort_ind]
 
     @property
     def coords(self):
@@ -116,6 +124,8 @@
     _type_name = "ray"
     _con_args = ('start_point', 'end_point')
     _container_fields = ("t", "dts")
+    _sort_by = "t"
+    _sort_ind = None
     def __init__(self, start_point, end_point, pf=None, field_parameters=None):
         super(YTRayBase, self).__init__(pf, field_parameters)
         self.start_point = self.pf.arr(start_point,
@@ -138,6 +148,13 @@
         else:
             raise KeyError(field)
 
+    def __getitem__(self, key):
+        if self._sort_ind is None:
+            sv = super(YTRayBase, self).__getitem__(self._sort_by)
+            self._sort_ind = np.argsort(sv)
+        v = super(YTRayBase, self).__getitem__(key)
+        return v[self._sort_ind]
+
 class YTSliceBase(YTSelectionContainer2D):
     """
     This is a data object corresponding to a slice through the simulation

diff -r 63ac8c15eed13b097c8c1e24dfcd1228e101f2fd -r 565f6387bbc75ccaf9b30dcdd1447e2c470765fb yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -45,5 +45,8 @@
 
 def test_amr_ray():
     pf = fake_amr_pf()
+    s = pf.slice(0, 0.5)
     ray = pf.ray(pf.domain_left_edge, pf.domain_right_edge)
     yield assert_equal, (np.diff(ray["t"]) >= 0).all(), True
+    ray = pf.ortho_ray(0, (0.5, 0.5))
+    yield assert_equal, (np.diff(ray["x"]) >= 0).all(), True


https://bitbucket.org/yt_analysis/yt/commits/8381e775da18/
Changeset:   8381e775da18
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-06-26 19:07:44
Summary:     Cleaning up test.
Affected #:  1 file

diff -r 565f6387bbc75ccaf9b30dcdd1447e2c470765fb -r 8381e775da180d6db599652ddf25c27ffce367b0 yt/data_objects/tests/test_rays.py
--- a/yt/data_objects/tests/test_rays.py
+++ b/yt/data_objects/tests/test_rays.py
@@ -45,7 +45,6 @@
 
 def test_amr_ray():
     pf = fake_amr_pf()
-    s = pf.slice(0, 0.5)
     ray = pf.ray(pf.domain_left_edge, pf.domain_right_edge)
     yield assert_equal, (np.diff(ray["t"]) >= 0).all(), True
     ray = pf.ortho_ray(0, (0.5, 0.5))


https://bitbucket.org/yt_analysis/yt/commits/89defb5f61f2/
Changeset:   89defb5f61f2
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-05 17:57:04
Summary:     Adding manual sort of ray field data since it no longer arrives sorted.
Affected #:  1 file

diff -r c7769ba6618ff0bbc7c89cb42c1027e8e5ea80b5 -r 89defb5f61f22df79629f7d7689e07bb63ca4360 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -334,11 +334,12 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = pf.ray(sub_segment[0], sub_segment[1])
-                sub_data['dl'].extend(sub_ray['dts'] *
+                asort = np.argsort(sub_ray["t"])
+                sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,
                                                     sub_ray.end_point))
                 for field in data_fields:
-                    sub_data[field].extend(sub_ray[field])
+                    sub_data[field].extend(sub_ray[field][asort])
 
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
@@ -347,13 +348,14 @@
                                       sub_ray['y-velocity'],
                                       sub_ray['z-velocity']])
                     sub_data['los_velocity'].extend((np.rollaxis(sub_vel, 1) *
-                                                     line_of_sight).sum(axis=1))
+                                                     line_of_sight).sum(axis=1)[asort])
                     del sub_vel
 
                 sub_ray.clear_data()
-                del sub_ray
+                del sub_ray, asort
 
             for key in sub_data:
+                if key in "xyz": continue
                 sub_data[key] = pf.arr(sub_data[key]).in_cgs()
 
             # Get redshift for each lixel.  Assume linear relation between l and z.


https://bitbucket.org/yt_analysis/yt/commits/988760556bfe/
Changeset:   988760556bfe
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-05 17:59:30
Summary:     Removing commented out code.
Affected #:  1 file

diff -r 89defb5f61f22df79629f7d7689e07bb63ca4360 -r 988760556bfe1a2c069f1de669af1b18d99b431f yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -123,7 +123,6 @@
         self.end_point = self.pf.arr(end_point,
                             'code_length', dtype='float64')
         self.vec = self.end_point - self.start_point
-        #self.vec /= np.sqrt(np.dot(self.vec, self.vec))
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)
         self._dts, self._ts = None, None


https://bitbucket.org/yt_analysis/yt/commits/8ae6334f4e90/
Changeset:   8ae6334f4e90
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-05 18:05:48
Summary:     Updating lightray recipe.
Affected #:  1 file

diff -r 988760556bfe1a2c069f1de669af1b18d99b431f -r 8ae6334f4e90dc3bb81052acd9b693d1ae834d6b doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ b/doc/source/cookbook/make_light_ray.py
@@ -1,10 +1,6 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import os
 import sys
 import yt
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
 from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
      LightRay
 
@@ -19,51 +15,11 @@
               use_minimum_datasets=True,
               time_data=False)
 
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir' : 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
 # Make a light ray, and set njobs to -1 to use one core
 # per dataset.
 lr.make_light_ray(seed=123456789,
                   solution_filename='LR/lightraysolution.txt',
                   data_filename='LR/lightray.h5',
                   fields=['temperature', 'density'],
-                  get_nearest_halo=True,
-                  nearest_halo_fields=['TotalMassMsun_100',
-                                       'RadiusMpc_100'],
-                  halo_profiler_parameters=halo_profiler_parameters,
                   get_los_velocity=True,
                   njobs=-1)


https://bitbucket.org/yt_analysis/yt/commits/cd36fb4b57c5/
Changeset:   cd36fb4b57c5
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-05 18:10:14
Summary:     Updating recipe listing.
Affected #:  3 files

diff -r 8ae6334f4e90dc3bb81052acd9b693d1ae834d6b -r cd36fb4b57c5bf82b3ae2f87d2f6bbc1a3b22b01 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -51,13 +51,13 @@
 
 .. yt_cookbook:: unique_light_cone_projections.py 
 
-Making Light Rays
-~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a synthetic quasar sight line and 
-uses the halo profiler to record information about halos close to the 
-line of sight.
+Light Rays
+~~~~~~~~~~
+This script demonstrates how to make a synthetic quasar sight line that 
+extends over multiple datasets and can be used to generate a synthetic 
+absorption spectrum.
 
-.. yt_cookbook:: make_light_ray.py 
+.. yt_cookbook:: light_ray.py 
 
 Creating and Fitting Absorption Spectra
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 8ae6334f4e90dc3bb81052acd9b693d1ae834d6b -r cd36fb4b57c5bf82b3ae2f87d2f6bbc1a3b22b01 doc/source/cookbook/light_ray.py
--- /dev/null
+++ b/doc/source/cookbook/light_ray.py
@@ -0,0 +1,25 @@
+import os
+import sys
+import yt
+from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
+     LightRay
+
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
+     
+# Create a LightRay object extending from z = 0 to z = 0.1
+# and use only the redshift dumps.
+lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
+              'Enzo', 0.0, 0.1,
+              use_minimum_datasets=True,
+              time_data=False)
+
+# Make a light ray, and set njobs to -1 to use one core
+# per dataset.
+lr.make_light_ray(seed=123456789,
+                  solution_filename='LR/lightraysolution.txt',
+                  data_filename='LR/lightray.h5',
+                  fields=['temperature', 'density'],
+                  get_los_velocity=True,
+                  njobs=-1)

diff -r 8ae6334f4e90dc3bb81052acd9b693d1ae834d6b -r cd36fb4b57c5bf82b3ae2f87d2f6bbc1a3b22b01 doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import sys
-import yt
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     LightRay
-
-# Create a directory for the light rays
-if not os.path.isdir("LR"): 
-    os.mkdir('LR')
-     
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
-              'Enzo', 0.0, 0.1,
-              use_minimum_datasets=True,
-              time_data=False)
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456789,
-                  solution_filename='LR/lightraysolution.txt',
-                  data_filename='LR/lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_los_velocity=True,
-                  njobs=-1)


https://bitbucket.org/yt_analysis/yt/commits/aa0f1878ea80/
Changeset:   aa0f1878ea80
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-05 18:11:35
Summary:     No-op merging with attempt to sort ray field data.
Affected #:  2 files



https://bitbucket.org/yt_analysis/yt/commits/d2d2ea8f7610/
Changeset:   d2d2ea8f7610
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-08 21:01:44
Summary:     Correcting some units in cosmology calculator.
Affected #:  1 file

diff -r aa0f1878ea809c609fa34b129bd4088a4b6d1417 -r d2d2ea8f7610f4a6997695125c965a15f11f8333 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -99,7 +99,7 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.comoving_radial_distance(0., 1.).in_units("Mpc")
+        >>> print co.comoving_radial_distance(0., 1.).in_units("Mpccm")
         
         """
         return (self.hubble_distance() *
@@ -122,7 +122,7 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.comoving_transverse_distance(0., 1.).in_units("Mpc")
+        >>> print co.comoving_transverse_distance(0., 1.).in_units("Mpccm")
         
         """
         if (self.omega_curvature > 0):
@@ -156,7 +156,7 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.comoving_volume(0., 1.).in_units("Gpc**3")
+        >>> print co.comoving_volume(0., 1.).in_units("Gpccm**3")
 
         """
         if (self.omega_curvature > 0):
@@ -191,7 +191,7 @@
     def angular_diameter_distance(self, z_i, z_f):
         r"""
         The proper transverse distance between two points at redshift z_f 
-        observed at redshift z_i to have an angular separation of one radian.
+        observed at redshift z_i per unit of angular separation.
 
         Parameters
         ----------
@@ -204,12 +204,13 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.angular_diameter_distance(0., 1.).in_units("Mpc")
+        >>> print co.angular_diameter_distance(0., 1.).in_units("Mpc/deg")
         
         """
         
         return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs()
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs() / \
+                self.quan(1., "radian")
 
     def luminosity_distance(self, z_i, z_f):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/4c949dcc287a/
Changeset:   4c949dcc287a
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 16:05:45
Summary:     Almost finished porting basic LightCone to 3.0.  Still need to fix tiled images.
Affected #:  2 files

diff -r d2d2ea8f7610f4a6997695125c965a15f11f8333 -r 4c949dcc287a4497ca658baa305e04c3363c6402 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -18,24 +18,29 @@
 import numpy as np
 import os
 
-from yt.funcs import *
+from yt.funcs import \
+    mylog, \
+    only_on_root
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
-     CosmologySplice
+    CosmologySplice
 from yt.convenience import \
-     load
+    load
 from yt.utilities.cosmology import \
-     Cosmology
+    Cosmology
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
 from yt.visualization.image_writer import \
-     write_image
+    write_image
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from .common_n_volume import \
-     common_volume
+    common_volume
 from .halo_mask import \
     _light_cone_halo_mask
 from .light_cone_projection import \
-     _light_cone_projection
+    _light_cone_projection
 
 class LightCone(CosmologySplice):
     """
@@ -50,12 +55,6 @@
     observer_redshift : float
         The redshift of the observer.
         Default: 0.0.
-    field_of_view_in_arcminutes : float
-        The field of view of the image in units of arcminutes.
-        Default: 600.0.
-    image_resolution_in_arcseconds : float
-        The size of each image pixel in units of arcseconds.
-        Default: 60.0.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the initial
         and final redshift.  If false, the light cone solution will contain
@@ -99,8 +98,6 @@
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  observer_redshift=0.0,
-                 field_of_view_in_arcminutes=600.0,
-                 image_resolution_in_arcseconds=60.0,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
@@ -110,8 +107,6 @@
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.observer_redshift = observer_redshift
-        self.field_of_view_in_arcminutes = field_of_view_in_arcminutes
-        self.image_resolution_in_arcseconds = image_resolution_in_arcseconds
         self.use_minimum_datasets = use_minimum_datasets
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
@@ -123,8 +118,6 @@
         self.output_prefix = output_prefix
 
         self.master_solution = [] # kept to compare with recycled solutions
-        self.projection_stack = []
-        self.projection_weight_field_stack = []
         self.halo_mask = []
 
         # Original random seed of the first solution.
@@ -134,10 +127,6 @@
         self.recycle_solution = False
         self.recycle_random_seed = 0
 
-        # Calculate number of pixels.
-        self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
-                          self.image_resolution_in_arcseconds)
-
         # Create output directory.
         if not os.path.exists(self.output_dir):
             only_on_root(os.mkdir, self.output_dir)
@@ -173,7 +162,8 @@
         # Don't use box coherence with maximum projection depths.
         if self.use_minimum_datasets and \
                 self.minimum_coherent_box_fraction > 0:
-            mylog.info("Setting minimum_coherent_box_fraction to 0 with minimal light cone.")
+            mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
+                       "minimal light cone.")
             self.minimum_coherent_box_fraction = 0
 
         # Make sure recycling flag is off.
@@ -197,21 +187,20 @@
                 del self.light_cone_solution[q]['previous']
             if self.light_cone_solution[q].has_key('next'):
                 del self.light_cone_solution[q]['next']
-            if (q == len(self.light_cone_solution) - 1):
+            if q == len(self.light_cone_solution) - 1:
                 z_next = self.near_redshift
             else:
                 z_next = self.light_cone_solution[q+1]['redshift']
 
             # Calculate fraction of box required for a depth of delta z
             self.light_cone_solution[q]['box_depth_fraction'] = \
-                self.cosmology.ComovingRadialDistance(z_next, \
-                        self.light_cone_solution[q]['redshift']) * \
-                        self.simulation.hubble_constant / \
-                        self.simulation.box_size
+                (self.cosmology.comoving_radial_distance(z_next, \
+                        self.light_cone_solution[q]['redshift']) / \
+                        self.simulation.box_size).in_units("")
 
             # Simple error check to make sure more than 100% of box depth
             # is never required.
-            if (self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
+            if self.light_cone_solution[q]['box_depth_fraction'] > 1.0:
                 mylog.debug("Warning: box fraction required to go from z = %f to %f is %f" %
                             (self.light_cone_solution[q]['redshift'], z_next,
                              self.light_cone_solution[q]['box_depth_fraction']))
@@ -219,16 +208,6 @@
                             (self.light_cone_solution[q]['deltazMax'],
                              self.light_cone_solution[q]['redshift']-z_next))
 
-            # Calculate fraction of box required for width corresponding to
-            # requested image size.
-            scale = self.cosmology.AngularScale_1arcsec_kpc(self.observer_redshift,
-                self.light_cone_solution[q]['redshift'])
-            size = self.field_of_view_in_arcminutes * 60.0 * scale / 1000.0
-            boxSizeProper = self.simulation.box_size / \
-              (self.simulation.hubble_constant *
-               (1.0 + self.light_cone_solution[q]['redshift']))
-            self.light_cone_solution[q]['box_width_fraction'] = size / boxSizeProper
-
             # Get projection axis and center.
             # If using box coherence, only get random axis and center if enough
             # of the box has been used, or if box_fraction_used will be greater
@@ -241,7 +220,7 @@
                 self.light_cone_solution[q]['projection_axis'] = \
                   np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [np.random.random() for i in range(3)]
+                  np.random.random(3)
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -249,7 +228,7 @@
                 self.light_cone_solution[q]['projection_axis'] = \
                   self.light_cone_solution[q-1]['projection_axis']
                 self.light_cone_solution[q]['projection_center'] = \
-                  copy.deepcopy(self.light_cone_solution[q-1]['projection_center'])
+                  self.light_cone_solution[q-1]['projection_center'].copy()
                 self.light_cone_solution[q]['projection_center']\
                   [self.light_cone_solution[q]['projection_axis']] += \
                     0.5 * (self.light_cone_solution[q]['box_depth_fraction'] +
@@ -281,7 +260,7 @@
         mask_file : string
             An hdf5 file to output the halo mask.
             Default: None
-        cub_file : string
+        cube_file : string
             An hdf5 file to output a halo mask for each slice
             of the light cone.
             Default: None
@@ -337,8 +316,9 @@
                 self.halo_mask *= mask
             del halo_mask_cube
 
-    def project_light_cone(self, field, weight_field=None, apply_halo_mask=False,
-                           node=None, save_stack=True, save_final_image=True,
+    def project_light_cone(self, field_of_view, image_resolution, field,
+                           weight_field=None, apply_halo_mask=False,
+                           save_stack=True, save_final_image=True,
                            save_slice_images=False,
                            cmap_name='algae', photon_field=False,
                            njobs=1, dynamic=False):
@@ -346,6 +326,10 @@
 
         Parameters
         ----------
+        field_of_view : YTQuantity or tuple of (float, str)
+            The field of view of the image and the units.
+        image_resolution : YTQuantity or tuple of (float, str)
+            The size of each image pixel and the units.
         field : string
             The projected field.
         weight_field : string
@@ -356,10 +340,6 @@
             if True, a boolean mask is apply to the light cone projection.  See
             below for a description of halo masks.
             Default: False.
-        node : string
-            a prefix to be prepended to the node name under which the
-            projection data is serialized.
-            Default: None.
         save_stack : bool
             if True, the light cone data including each individual
             slice is written to an hdf5 file.
@@ -390,15 +370,28 @@
 
         """
 
+        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
+            field_of_view = self.simulation.quan(field_of_view[0],
+                                                 field_of_view[1])
+        elif not isinstance(field_of_view, YTArray):
+          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
+                             "or a tuple of type (float, str).")
+        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
+            image_resolution = self.simulation.quan(image_resolution[0],
+                                                    image_resolution[1])
+        elif not isinstance(image_resolution, YTArray):
+          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
+                             "or a tuple of type (float, str).")
+        
+        # Calculate number of pixels on a side.
+        pixels = (field_of_view / image_resolution).in_units("")
+
         # Clear projection stack.
-        self.projection_stack = []
-        self.projection_weight_field_stack = []
-        if (self.light_cone_solution[-1].has_key('object')):
+        projection_stack = []
+        projection_weight_stack = []
+        if self.light_cone_solution[-1].has_key('object'):
             del self.light_cone_solution[-1]['object']
 
-        if not(self.output_dir.endswith("/")):
-                 self.output_dir += "/"
-
         # for q, output in enumerate(self.light_cone_solution):
         all_storage = {}
         for my_storage, output in parallel_objects(self.light_cone_solution,
@@ -406,19 +399,29 @@
                                                    dynamic=dynamic):
             output['object'] = load(output['filename'])
             output['object'].parameters.update(self.set_parameters)
-            frb = _light_cone_projection(output, field, self.pixels,
-                                         weight_field=weight_field, node=node)
+
+            # Calculate fraction of box required for width corresponding to
+            # requested image size.
+            proper_box_size = self.simulation.box_size / \
+              (1.0 + output['redshift'])
+            output['box_width_fraction'] = \
+              (self.cosmology.angular_diameter_distance(self.observer_redshift,
+               output['redshift']) * field_of_view /
+               proper_box_size).in_units("")
+            
+            frb = _light_cone_projection(output, field, pixels,
+                                         weight_field=weight_field)
 
             if photon_field:
                 # Decrement the flux by the luminosity distance.
                 # Assume field in frb is in erg/s/cm^2/Hz
-                dL = self.cosmology.LuminosityDistance(self.observer_redshift,
-                                                       output['redshift']) #in Mpc
-                boxSizeProper = self.simulation.box_size / \
-                  (self.simulation.hubble_constant * (1.0 + output['redshift']))
-                pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*np.pi*dL**2)
-                mylog.info("Distance to slice = %e" % dL)
+                dL = self.cosmology.luminosity_distance(self.observer_redshift,
+                                                        output['redshift'])
+                proper_box_size = self.simulation.box_size / \
+                  (1.0 + output['redshift'])
+                pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
+                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
+                mylog.info("Distance to slice = %s" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
             if weight_field is None:
@@ -435,16 +438,10 @@
         all_slices.sort()
         for my_slice in all_slices:
             if save_slice_images:
-                if node is None:
-                    name = "%s%s_%04d_%04d" % (self.output_dir,
-                                               self.output_prefix,
-                                               my_slice,
-                                               len(self.light_cone_solution))
-                else:
-                    name = "%s%s_%s_%04d_%04d" % (self.output_dir,
-                                                  self.output_prefix,
-                                                  node, my_slice,
-                                                  len(self.light_cone_solution))
+                name = os.path.join(self.output_dir,
+                                    "%s_%04d_%04d" %
+                                    (self.output_prefix,
+                                     my_slice, len(self.light_cone_solution)))
                 if weight_field is None:
                     my_image = all_storage[my_slice]['field']
                 else:
@@ -453,21 +450,22 @@
                 only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
-            self.projection_stack.append(all_storage[my_slice]['field'])
+            projection_stack.append(all_storage[my_slice]['field'])
             if weight_field is not None:
-                self.projection_weight_field_stack.append(all_storage[my_slice]['field'])
+                projection_weight_stack.append(all_storage[my_slice]['field'])
 
+        projection_stack = self.simulation.arr(projection_stack)
+        projection_weight_stack = self.simulation.arr(projection_weight_stack)
+                
         # Add up slices to make light cone projection.
         if (weight_field is None):
-            light_cone_projection = sum(self.projection_stack)
+            light_cone_projection = projection_stack.sum(axis=0)
         else:
-            light_cone_projection = sum(self.projection_stack) / \
-              sum(self.projection_weight_field_stack)
+            light_cone_projection = \
+              projection_stack.sum(axis=0) / \
+              self.simulation.arr(projection_weight_stack).sum(axis=0)
 
-        if node is None:
-            filename = "%s%s" % (self.output_dir, self.output_prefix)
-        else:
-            filename = "%s%s_%s" % (self.output_dir, self.output_prefix, node)
+        filename = os.path.join(self.output_dir, self.output_prefix)
 
         # Apply halo mask.
         if apply_halo_mask:
@@ -485,8 +483,11 @@
 
         # Write stack to hdf5 file.
         if save_stack:
-            self._save_light_cone_stack(field=field, weight_field=weight_field,
-                                        filename=filename)
+            self._save_light_cone_stack(field, weight_field,
+                projection_stack, projection_weight_stack,
+                filename=filename,
+                attrs={"field_of_view": str(field_of_view),
+                       "image_resolution": str(image_resolution)})
 
     def rerandomize_light_cone_solution(self, new_seed, recycle=True, filename=None):
         """
@@ -675,84 +676,57 @@
         f.close()
 
     @parallel_root_only
-    def _save_light_cone_stack(self, field=None, weight_field=None,
-                               filename=None, over_write=True):
+    def _save_light_cone_stack(self, field, weight_field,
+                               pstack, wstack,
+                               filename=None, attrs=None):
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
+        if attrs is None:
+            attrs = {}
+        
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = np.array([my_slice['redshift'] \
+        redshift_list = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
         weight_field_node = "weight_field_%s" % weight_field
 
         if (filename is None):
-            filename = "%s/%s_data" % (self.output_dir, self.output_prefix)
+            filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix)
         if not(filename.endswith('.h5')):
                filename += ".h5"
 
-        if (len(self.projection_stack) == 0):
-            mylog.debug("save_light_cone_stack: no projection data loaded.")
+        if pstack.size == 0:
+            mylog.info("save_light_cone_stack: light cone projection is empty.")
             return
 
         mylog.info("Writing light cone data to %s." % filename)
 
-        output = h5py.File(filename, "a")
+        fh = h5py.File(filename, "a")
 
-        node_exists = field_node in output
+        if field_node in fh:
+            del fh[field_node]
 
-        if node_exists:
-            if over_write:
-                mylog.info("Dataset, %s, already exists, overwriting." %
-                           field_node)
-                write_data = True
-                del output[field_node]
-            else:
-                mylog.info("Dataset, %s, already exists in %s, not saving." %
-                           (field_node, filename))
-                write_data = False
-        else:
-            write_data = True
+        mylog.info("Saving %s to %s." % (field_node, filename))
+        dataset = fh.create_dataset(field_node,
+                                          data=pstack)
+        dataset.attrs["units"] = str(pstack.units)
+        dataset.attrs['redshifts'] = redshift_list
+        dataset.attrs['observer_redshift'] = np.float(self.observer_redshift)
+        for key, value in attrs.items():
+            dataset.attrs[key] = value
 
-        if write_data:
-            mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = np.array(self.projection_stack)
-            field_dataset = output.create_dataset(field_node,
-                                                  data=self.projection_stack)
-            field_dataset.attrs['redshifts'] = redshiftList
-            field_dataset.attrs['observer_redshift'] = \
-              np.float(self.observer_redshift)
-            field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              np.float(self.field_of_view_in_arcminutes)
-            field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              np.float(self.image_resolution_in_arcseconds)
+        if wstack.size > 0:
+            if weight_field_node in fh:
+                del fh[weight_field_node]
 
-        if (len(self.projection_weight_field_stack) > 0):
-            if node_exists:
-                if over_write:
-                    mylog.info("Dataset, %s, already exists, overwriting." %
-                               weight_field_node)
-                    del output[field_node]
-                else:
-                    mylog.info("Dataset, %s, already exists in %s, not saving." %
-                               (weight_field_node, filename))
-                    write_data = False
-            else:
-                write_data = True
+            mylog.info("Saving %s to %s." % (weight_field_node, filename))
+            dataset = fh.create_dataset(weight_field_node,
+                                        data=wstack)
+            dataset.attrs["units"] = str(wstack.units)
+            dataset.attrs['redshifts'] = redshift_list
+            dataset.attrs['observer_redshift'] = np.float(self.observer_redshift)
+            for key, value in attrs.items():
+                dataset.attrs[key] = value
 
-            if write_data:
-                mylog.info("Saving %s to %s." % (weight_field_node, filename))
-                self.projection_weight_field_stack = \
-                  np.array(self.projection_weight_field_stack)
-                weight_field_dataset = \
-                  output.create_dataset(weight_field_node,
-                                        data=self.projection_weight_field_stack)
-                weight_field_dataset.attrs['redshifts'] = redshiftList
-                weight_field_dataset.attrs['observer_redshift'] = \
-                  np.float(self.observer_redshift)
-                weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  np.float(self.field_of_view_in_arcminutes)
-                weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  np.float(self.image_resolution_in_arcseconds)
-
-        output.close()
+        fh.close()

diff -r d2d2ea8f7610f4a6997695125c965a15f11f8333 -r 4c949dcc287a4497ca658baa305e04c3363c6402 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -14,91 +14,104 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import copy
 
-from yt.funcs import *
+from yt.funcs import \
+     mylog
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
 
 @parallel_blocking_call
-def _light_cone_projection(lightConeSlice, field, pixels, weight_field=None,
-                           save_image=False, node=None, field_cuts=None):
+def _light_cone_projection(my_slice, field, pixels, weight_field=None,
+                           save_image=False, field_cuts=None):
     "Create a single projection to be added into the light cone stack."
 
-    # Use some projection parameters to seed random number generator to make unique node name.
-    # We are just saving the projection object, so only the projection axis needs to be considered
-    # since the lateral shifting and tiling occurs after the projection object is made.
+    # We are just saving the projection object, so only the projection axis 
+    # needs to be considered since the lateral shifting and tiling occurs after 
+    # the projection object is made.
     # Likewise, only the box_depth_fraction needs to be considered.
 
-    # projection_axis
-    # projection_center[projection_axis]
-    # box_depth_fraction
+    mylog.info("Making projection at z = %f from %s." % \
+               (my_slice["redshift"], my_slice["filename"]))
 
-    # Name node with user specified keyword if given with 'node' keyword.
-    node_name = "LightCone_%s_%d_%f_%f" % (node, lightConeSlice['projection_axis'],
-                                           lightConeSlice['projection_center'][lightConeSlice['projection_axis']],
-                                           lightConeSlice['box_depth_fraction'])
-
-    mylog.info("Making projection at z = %f from %s." % (lightConeSlice['redshift'], lightConeSlice['filename']))
-
-    region_center = [0.5 * (lightConeSlice['object'].domain_right_edge[q] +
-                            lightConeSlice['object'].domain_left_edge[q]) \
-                         for q in range(lightConeSlice['object'].dimensionality)]
+    region_center = [0.5 * (my_slice["object"].domain_right_edge[q] +
+                            my_slice["object"].domain_left_edge[q]) \
+                         for q in range(my_slice["object"].dimensionality)]
 
     # 1. The Depth Problem
     # Use coordinate field cut in line of sight to cut projection to proper depth.
     if field_cuts is None:
         these_field_cuts = []
     else:
-        these_field_cuts = copy.deepcopy(field_cuts)
+        these_field_cuts = field_cuts.copy()
 
-    if (lightConeSlice['box_depth_fraction'] < 1):
-        axis = ('x', 'y', 'z')[lightConeSlice['projection_axis']]
-        depthLeft = lightConeSlice['projection_center'][lightConeSlice['projection_axis']] \
-            - 0.5 * lightConeSlice['box_depth_fraction']
-        depthRight = lightConeSlice['projection_center'][lightConeSlice['projection_axis']] \
-            + 0.5 * lightConeSlice['box_depth_fraction']
+    if (my_slice["box_depth_fraction"] < 1):
+        axis = ("x", "y", "z")[my_slice["projection_axis"]]
+        depthLeft = \
+          my_slice["projection_center"][my_slice["projection_axis"]] \
+            - 0.5 * my_slice["box_depth_fraction"]
+        depthRight = \
+          my_slice["projection_center"][my_slice["projection_axis"]] \
+            + 0.5 * my_slice["box_depth_fraction"]
         if (depthLeft < 0):
-            cut_mask = "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & (grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | ((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & (grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))" % \
-                (axis, axis, axis, axis, depthRight, axis, axis, (depthLeft+1), axis, axis)
+            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
+              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
+              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
+              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+                (axis, axis, axis, axis, depthRight, 
+                 axis, axis, (depthLeft+1), axis, axis)
         elif (depthRight > 1):
-            cut_mask = "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & (grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | ((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & (grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))" % \
-                (axis, axis, axis, axis, (depthRight-1), axis, axis, depthLeft, axis, axis)
+            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
+              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
+              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
+              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+                (axis, axis, axis, axis, (depthRight-1),
+                 axis, axis, depthLeft, axis, axis)
         else:
-            cut_mask = "(grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & (grid[\"%s\"] - 0.5*grid[\"%s\"] <= %f)" % (axis, axis, depthLeft, axis, axis, depthRight)
+            cut_mask = ("(grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
+              "(grid[\"%s\"] - 0.5*grid[\"%s\"] <= %f)") % \
+              (axis, axis, depthLeft, axis, axis, depthRight)
 
         these_field_cuts.append(cut_mask)
 
     # Make projection.
-    proj = lightConeSlice['object'].h.proj(field, lightConeSlice['projection_axis'], 
-                                           weight_field, center=region_center,
-                                           field_cuts=these_field_cuts, node_name=node_name)
+    proj = my_slice["object"].proj(field, my_slice["projection_axis"], 
+        weight_field, center=region_center,
+        field_parameters=dict(field_cuts=these_field_cuts))
+    proj_field = proj.field[0]
 
     # 2. The Tile Problem
     # Tile projection to specified width.
 
     # Original projection data.
-    original_px = copy.deepcopy(proj['px'])
-    original_py = copy.deepcopy(proj['py'])
-    original_pdx = copy.deepcopy(proj['pdx'])
-    original_pdy = copy.deepcopy(proj['pdy'])
-    original_field = copy.deepcopy(proj[field])
-    original_weight_field = copy.deepcopy(proj['weight_field'])
+    original_px = proj.field_data["px"].in_units("code_length").copy()
+    original_py = proj.field_data["py"].in_units("code_length").copy()
+    original_pdx = proj.field_data["pdx"].in_units("code_length").copy()
+    original_pdy = proj.field_data["pdy"].in_units("code_length").copy()
+    original_field = proj.field_data[proj_field].copy()
+    original_weight_field = proj.field_data["weight_field"].copy()
+
+    for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
+        proj.field_data[my_field] = [proj.field_data[my_field]]
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(my_slice["box_width_fraction"]))):
+        x = my_slice["object"].quan(x, "code_length")
+        for y in range(int(np.ceil(my_slice["box_width_fraction"]))):
+            y = my_slice["object"].quan(y, "code_length")
             if ((x + y) > 0):
-                proj['px'] = np.concatenate([proj['px'], original_px+x])
-                proj['py'] = np.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
-                proj[field] = np.concatenate([proj[field], original_field])
-                proj['weight_field'] = np.concatenate([proj['weight_field'],
-                                                       original_weight_field])
+                proj.field_data["px"] += [original_px+x]
+                proj.field_data["py"] += [original_py+y]
+                proj.field_data["pdx"] += [original_pdx]
+                proj.field_data["pdy"] += [original_pdy]
+                proj.field_data["weight_field"] += [original_weight_field]
+                proj.field_data[proj_field] += [original_field]
 
+    for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
+        proj.field_data[my_field] = \
+          my_slice["object"].arr(proj.field_data[my_field]).flatten()
+                
     # Delete originals.
     del original_px
     del original_py
@@ -110,81 +123,95 @@
     # 3. The Shift Problem
     # Shift projection by random x and y offsets.
 
-    offset = copy.deepcopy(lightConeSlice['projection_center'])
-    # Delete depth coordinate.
-    del offset[lightConeSlice['projection_axis']]
+    image_axes = np.roll(np.arange(3), -my_slice["projection_axis"])[1:]
+    image_right_x = my_slice["object"].domain_right_edge[image_axes[0]]
+    image_left_x  = my_slice["object"].domain_left_edge[image_axes[0]]
+    image_right_y = my_slice["object"].domain_right_edge[image_axes[1]]
+    image_left_y  = my_slice["object"].domain_left_edge[image_axes[1]]
+    
+    offset = my_slice["projection_center"].copy() * \
+      my_slice["object"].domain_width
+    offset = np.roll(offset, -my_slice["projection_axis"])[1:]
 
     # Shift x and y positions.
-    proj['px'] -= offset[0]
-    proj['py'] -= offset[1]
+    proj.field_data["px"] -= offset[0]
+    proj.field_data["py"] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj.field_data["px"][proj.field_data["px"] < image_left_x] += \
+      np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    proj.field_data["py"][proj.field_data["py"] < image_left_y] += \
+      np.ceil(my_slice["box_width_fraction"]) * image_right_y
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
-    add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      np.ceil(lightConeSlice['box_width_fraction'])
-    add_x_px = proj['px'][add_x_right]
-    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
-    add_x_py = proj['py'][add_x_right]
-    add_x_pdx = proj['pdx'][add_x_right]
-    add_x_pdy = proj['pdy'][add_x_right]
-    add_x_field = proj[field][add_x_right]
-    add_x_weight_field = proj['weight_field'][add_x_right]
+    add_x_right = proj.field_data["px"] + 0.5 * proj.field_data["pdx"] > \
+      np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    add_x_px = proj.field_data["px"][add_x_right]
+    add_x_px -= np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    add_x_py = proj.field_data["py"][add_x_right]
+    add_x_pdx = proj.field_data["pdx"][add_x_right]
+    add_x_pdy = proj.field_data["pdy"][add_x_right]
+    add_x_field = proj.field_data[proj_field][add_x_right]
+    add_x_weight_field = proj.field_data["weight_field"][add_x_right]
     del add_x_right
 
     # Cells hanging off the left edge.
-    add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
-    add2_x_px = proj['px'][add_x_left]
-    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
-    add2_x_py = proj['py'][add_x_left]
-    add2_x_pdx = proj['pdx'][add_x_left]
-    add2_x_pdy = proj['pdy'][add_x_left]
-    add2_x_field = proj[field][add_x_left]
-    add2_x_weight_field = proj['weight_field'][add_x_left]
+    add_x_left = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < image_left_x
+    add2_x_px = proj.field_data["px"][add_x_left]
+    add2_x_px += np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    add2_x_py = proj.field_data["py"][add_x_left]
+    add2_x_pdx = proj.field_data["pdx"][add_x_left]
+    add2_x_pdy = proj.field_data["pdy"][add_x_left]
+    add2_x_field = proj.field_data[proj_field][add_x_left]
+    add2_x_weight_field = proj.field_data["weight_field"][add_x_left]
     del add_x_left
 
     # Cells hanging off the top edge.
-    add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      np.ceil(lightConeSlice['box_width_fraction'])
-    add_y_px = proj['px'][add_y_right]
-    add_y_py = proj['py'][add_y_right]
-    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
-    add_y_pdx = proj['pdx'][add_y_right]
-    add_y_pdy = proj['pdy'][add_y_right]
-    add_y_field = proj[field][add_y_right]
-    add_y_weight_field = proj['weight_field'][add_y_right]
+    add_y_right = proj.field_data["py"] + 0.5 * proj.field_data["pdy"] > \
+      np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    add_y_px = proj.field_data["px"][add_y_right]
+    add_y_py = proj.field_data["py"][add_y_right]
+    add_y_py -= np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    add_y_pdx = proj.field_data["pdx"][add_y_right]
+    add_y_pdy = proj.field_data["pdy"][add_y_right]
+    add_y_field = proj.field_data[proj_field][add_y_right]
+    add_y_weight_field = proj.field_data["weight_field"][add_y_right]
     del add_y_right
 
     # Cells hanging off the bottom edge.
-    add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
-    add2_y_px = proj['px'][add_y_left]
-    add2_y_py = proj['py'][add_y_left]
-    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
-    add2_y_pdx = proj['pdx'][add_y_left]
-    add2_y_pdy = proj['pdy'][add_y_left]
-    add2_y_field = proj[field][add_y_left]
-    add2_y_weight_field = proj['weight_field'][add_y_left]
+    add_y_left = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < image_left_y
+    add2_y_px = proj.field_data["px"][add_y_left]
+    add2_y_py = proj.field_data["py"][add_y_left]
+    add2_y_py += np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    add2_y_pdx = proj.field_data["pdx"][add_y_left]
+    add2_y_pdy = proj.field_data["pdy"][add_y_left]
+    add2_y_field = proj.field_data[proj_field][add_y_left]
+    add2_y_weight_field = proj.field_data["weight_field"][add_y_left]
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
-                                 add2_x_px, add2_y_px])
-    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
-                                 add2_x_py, add2_y_py])
-    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
-                                  add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
-                                  add2_x_pdy, add2_y_pdy])
-    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
-                                  add2_x_field, add2_y_field])
-    proj['weight_field'] = np.concatenate([proj['weight_field'],
-                                           add_x_weight_field, add_y_weight_field,
-                                           add2_x_weight_field, add2_y_weight_field])
+    proj.field_data["px"] = proj.field_data["px"].unit_quantity * \
+      np.concatenate([proj.field_data["px"].d, add_x_px.d, add_y_px.d,
+                      add2_x_px.d, add2_y_px.d])
+    proj.field_data["py"] = proj.field_data["py"].unit_quantity * \
+        np.concatenate([proj.field_data["py"].d, add_x_py.d, add_y_py.d,
+                        add2_x_py.d, add2_y_py.d])
+    proj.field_data["pdx"] = proj.field_data["pdx"].unit_quantity * \
+        np.concatenate([proj.field_data["pdx"].d, add_x_pdx.d, add_y_pdx.d,
+                        add2_x_pdx.d, add2_y_pdx.d])
+    proj.field_data["pdy"] = proj.field_data["pdy"].unit_quantity * \
+        np.concatenate([proj.field_data["pdy"].d, add_x_pdy.d, add_y_pdy.d,
+                        add2_x_pdy.d, add2_y_pdy.d])
+    proj.field_data[proj_field] = proj.field_data["pdy"].unit_quantity * \
+        np.concatenate([proj.field_data[proj_field].d, add_x_field.d, add_y_field.d,
+                        add2_x_field.d, add2_y_field.d])
+    proj.field_data["weight_field"] = proj.field_data["weight_field"].unit_quantity * \
+        np.concatenate([proj.field_data["weight_field"].d,
+                        add_x_weight_field.d, add_y_weight_field.d,
+                        add2_x_weight_field.d, add2_y_weight_field.d])
 
     # Delete original copies of hanging cells.
     del add_x_px, add_y_px, add2_x_px, add2_y_px
@@ -197,29 +224,34 @@
     # Tiles were made rounding up the width to the nearest integer.
     # Cut off the edges to get the specified width.
     # Cut in the x direction.
-    cut_x = proj['px'] - 0.5 * proj['pdx'] < lightConeSlice['box_width_fraction']
-    proj['px'] = proj['px'][cut_x]
-    proj['py'] = proj['py'][cut_x]
-    proj['pdx'] = proj['pdx'][cut_x]
-    proj['pdy'] = proj['pdy'][cut_x]
-    proj[field] = proj[field][cut_x]
-    proj['weight_field'] = proj['weight_field'][cut_x]
+    cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < image_right_x
+    proj.field_data["px"] = proj.field_data["px"][cut_x]
+    proj.field_data["py"] = proj.field_data["py"][cut_x]
+    proj.field_data["pdx"] = proj.field_data["pdx"][cut_x]
+    proj.field_data["pdy"] = proj.field_data["pdy"][cut_x]
+    proj.field_data[proj_field] = proj.field_data[proj_field][cut_x]
+    proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_x]
     del cut_x
 
     # Cut in the y direction.
-    cut_y = proj['py'] - 0.5 * proj['pdy'] < lightConeSlice['box_width_fraction']
-    proj['px'] = proj['px'][cut_y]
-    proj['py'] = proj['py'][cut_y]
-    proj['pdx'] = proj['pdx'][cut_y]
-    proj['pdy'] = proj['pdy'][cut_y]
-    proj[field] = proj[field][cut_y]
-    proj['weight_field'] = proj['weight_field'][cut_y]
+    cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < image_right_y
+    proj.field_data["px"] = proj.field_data["px"][cut_y]
+    proj.field_data["py"] = proj.field_data["py"][cut_y]
+    proj.field_data["pdx"] = proj.field_data["pdx"][cut_y]
+    proj.field_data["pdy"] = proj.field_data["pdy"][cut_y]
+    proj.field_data[proj_field] = proj.field_data[proj_field][cut_y]
+    proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_y]
     del cut_y
 
     # Create fixed resolution buffer to return back to the light cone object.
     # These buffers will be stacked together to make the light cone.
-    frb = FixedResolutionBuffer(proj, (0, lightConeSlice['box_width_fraction'],
-                                       0, lightConeSlice['box_width_fraction']),
+    # frb = FixedResolutionBuffer(proj, (0, my_slice["box_width_fraction"],
+    #                                    0, my_slice["box_width_fraction"]),
+    frb = FixedResolutionBuffer(proj, 
+                                (image_left_x,
+                                 image_right_x * my_slice["box_width_fraction"],
+                                 image_left_y,
+                                 image_right_y * my_slice["box_width_fraction"]),
                                 (pixels, pixels), antialias=False)
 
     return frb


https://bitbucket.org/yt_analysis/yt/commits/ce25296b4e18/
Changeset:   ce25296b4e18
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 16:07:00
Summary:     Fixing quotes.
Affected #:  1 file

diff -r 4c949dcc287a4497ca658baa305e04c3363c6402 -r ce25296b4e18552e9a962c1abd35db4085a3f1e1 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -89,10 +89,10 @@
         Default: None.
     output_dir : string
         The directory in which images and data files will be written.
-        Default: 'LC'.
+        Default: "LC".
     output_prefix : string
         The prefix of all images and data files.
-        Default: 'LightCone'.
+        Default: "LightCone".
 
     """
     def __init__(self, parameter_filename, simulation_type,
@@ -102,7 +102,7 @@
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
                  find_outputs=False, set_parameters=None,
-                 output_dir='LC', output_prefix='LightCone'):
+                 output_dir="LC", output_prefix="LightCone"):
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -159,7 +159,7 @@
 
         """
 
-        # Don't use box coherence with maximum projection depths.
+        # Don"t use box coherence with maximum projection depths.
         if self.use_minimum_datasets and \
                 self.minimum_coherent_box_fraction > 0:
             mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
@@ -183,30 +183,30 @@
         box_fraction_used = 0.0
 
         for q in range(len(self.light_cone_solution)):
-            if self.light_cone_solution[q].has_key('previous'):
-                del self.light_cone_solution[q]['previous']
-            if self.light_cone_solution[q].has_key('next'):
-                del self.light_cone_solution[q]['next']
+            if self.light_cone_solution[q].has_key("previous"):
+                del self.light_cone_solution[q]["previous"]
+            if self.light_cone_solution[q].has_key("next"):
+                del self.light_cone_solution[q]["next"]
             if q == len(self.light_cone_solution) - 1:
                 z_next = self.near_redshift
             else:
-                z_next = self.light_cone_solution[q+1]['redshift']
+                z_next = self.light_cone_solution[q+1]["redshift"]
 
             # Calculate fraction of box required for a depth of delta z
-            self.light_cone_solution[q]['box_depth_fraction'] = \
+            self.light_cone_solution[q]["box_depth_fraction"] = \
                 (self.cosmology.comoving_radial_distance(z_next, \
-                        self.light_cone_solution[q]['redshift']) / \
+                        self.light_cone_solution[q]["redshift"]) / \
                         self.simulation.box_size).in_units("")
 
             # Simple error check to make sure more than 100% of box depth
             # is never required.
-            if self.light_cone_solution[q]['box_depth_fraction'] > 1.0:
+            if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
                 mylog.debug("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_cone_solution[q]['redshift'], z_next,
-                             self.light_cone_solution[q]['box_depth_fraction']))
+                            (self.light_cone_solution[q]["redshift"], z_next,
+                             self.light_cone_solution[q]["box_depth_fraction"]))
                 mylog.debug("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_cone_solution[q]['deltazMax'],
-                             self.light_cone_solution[q]['redshift']-z_next))
+                            (self.light_cone_solution[q]["deltazMax"],
+                             self.light_cone_solution[q]["redshift"]-z_next))
 
             # Get projection axis and center.
             # If using box coherence, only get random axis and center if enough
@@ -215,30 +215,30 @@
             if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
               (box_fraction_used > self.minimum_coherent_box_fraction) or \
               (box_fraction_used +
-               self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
+               self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                 # Random axis and center.
-                self.light_cone_solution[q]['projection_axis'] = \
+                self.light_cone_solution[q]["projection_axis"] = \
                   np.random.randint(0, 3)
-                self.light_cone_solution[q]['projection_center'] = \
+                self.light_cone_solution[q]["projection_center"] = \
                   np.random.random(3)
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
                 # but with depth center shifted.
-                self.light_cone_solution[q]['projection_axis'] = \
-                  self.light_cone_solution[q-1]['projection_axis']
-                self.light_cone_solution[q]['projection_center'] = \
-                  self.light_cone_solution[q-1]['projection_center'].copy()
-                self.light_cone_solution[q]['projection_center']\
-                  [self.light_cone_solution[q]['projection_axis']] += \
-                    0.5 * (self.light_cone_solution[q]['box_depth_fraction'] +
-                           self.light_cone_solution[q-1]['box_depth_fraction'])
-                if self.light_cone_solution[q]['projection_center']\
-                  [self.light_cone_solution[q]['projection_axis']] >= 1.0:
-                    self.light_cone_solution[q]['projection_center']\
-                      [self.light_cone_solution[q]['projection_axis']] -= 1.0
+                self.light_cone_solution[q]["projection_axis"] = \
+                  self.light_cone_solution[q-1]["projection_axis"]
+                self.light_cone_solution[q]["projection_center"] = \
+                  self.light_cone_solution[q-1]["projection_center"].copy()
+                self.light_cone_solution[q]["projection_center"]\
+                  [self.light_cone_solution[q]["projection_axis"]] += \
+                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
+                           self.light_cone_solution[q-1]["box_depth_fraction"])
+                if self.light_cone_solution[q]["projection_center"]\
+                  [self.light_cone_solution[q]["projection_axis"]] >= 1.0:
+                    self.light_cone_solution[q]["projection_center"]\
+                      [self.light_cone_solution[q]["projection_axis"]] -= 1.0
 
-            box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
+            box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]
 
         # Store this as the master solution.
         self.master_solution = [copy.deepcopy(q) \
@@ -293,9 +293,9 @@
 
         # Check if file already exists.
         if mask_file is not None and os.path.exists(mask_file):
-            mylog.info('Reading halo mask from %s.' % mask_file)
-            in_file = h5py.File(mask_file, 'r')
-            self.halo_mask = in_file['HaloMask'].value
+            mylog.info("Reading halo mask from %s." % mask_file)
+            in_file = h5py.File(mask_file, "r")
+            self.halo_mask = in_file["HaloMask"].value
             in_file.close()
 
         # Otherwise, make a halo mask.
@@ -320,7 +320,7 @@
                            weight_field=None, apply_halo_mask=False,
                            save_stack=True, save_final_image=True,
                            save_slice_images=False,
-                           cmap_name='algae', photon_field=False,
+                           cmap_name="algae", photon_field=False,
                            njobs=1, dynamic=False):
         r"""Create projections for light cone, then add them together.
 
@@ -352,7 +352,7 @@
             Default: False.
         cmap_name : string
             color map for images.
-            Default: 'algae'.
+            Default: "algae".
         photon_field : bool
             if True, the projection data for each slice is decremented by 4 Pi
             R^2`, where R is the luminosity distance between the observer and
@@ -389,24 +389,24 @@
         # Clear projection stack.
         projection_stack = []
         projection_weight_stack = []
-        if self.light_cone_solution[-1].has_key('object'):
-            del self.light_cone_solution[-1]['object']
+        if self.light_cone_solution[-1].has_key("object"):
+            del self.light_cone_solution[-1]["object"]
 
         # for q, output in enumerate(self.light_cone_solution):
         all_storage = {}
         for my_storage, output in parallel_objects(self.light_cone_solution,
                                                    storage=all_storage,
                                                    dynamic=dynamic):
-            output['object'] = load(output['filename'])
-            output['object'].parameters.update(self.set_parameters)
+            output["object"] = load(output["filename"])
+            output["object"].parameters.update(self.set_parameters)
 
             # Calculate fraction of box required for width corresponding to
             # requested image size.
             proper_box_size = self.simulation.box_size / \
-              (1.0 + output['redshift'])
-            output['box_width_fraction'] = \
+              (1.0 + output["redshift"])
+            output["box_width_fraction"] = \
               (self.cosmology.angular_diameter_distance(self.observer_redshift,
-               output['redshift']) * field_of_view /
+               output["redshift"]) * field_of_view /
                proper_box_size).in_units("")
             
             frb = _light_cone_projection(output, field, pixels,
@@ -416,22 +416,22 @@
                 # Decrement the flux by the luminosity distance.
                 # Assume field in frb is in erg/s/cm^2/Hz
                 dL = self.cosmology.luminosity_distance(self.observer_redshift,
-                                                        output['redshift'])
+                                                        output["redshift"])
                 proper_box_size = self.simulation.box_size / \
-                  (1.0 + output['redshift'])
+                  (1.0 + output["redshift"])
                 pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                 factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                 mylog.info("Distance to slice = %s" % dL)
-                frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
+                frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.
 
             if weight_field is None:
-                my_storage.result = {'field': frb[field]}
+                my_storage.result = {"field": frb[field]}
             else:
-                my_storage.result = {'field': (frb[field] *
-                                               frb['weight_field']),
-                                     'weight_field': frb['weight_field']}
+                my_storage.result = {"field": (frb[field] *
+                                               frb["weight_field"]),
+                                     "weight_field": frb["weight_field"]}
 
-            del output['object']
+            del output["object"]
 
         # Combine results from each slice.
         all_slices = all_storage.keys()
@@ -443,16 +443,16 @@
                                     (self.output_prefix,
                                      my_slice, len(self.light_cone_solution)))
                 if weight_field is None:
-                    my_image = all_storage[my_slice]['field']
+                    my_image = all_storage[my_slice]["field"]
                 else:
-                    my_image = all_storage[my_slice]['field'] / \
-                      all_storage[my_slice]['weight_field']
+                    my_image = all_storage[my_slice]["field"] / \
+                      all_storage[my_slice]["weight_field"]
                 only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
-            projection_stack.append(all_storage[my_slice]['field'])
+            projection_stack.append(all_storage[my_slice]["field"])
             if weight_field is not None:
-                projection_weight_stack.append(all_storage[my_slice]['field'])
+                projection_weight_stack.append(all_storage[my_slice]["field"])
 
         projection_stack = self.simulation.arr(projection_stack)
         projection_weight_stack = self.simulation.arr(projection_weight_stack)
@@ -528,8 +528,8 @@
 
         # Clean pf objects out of light cone solution.
         for my_slice in self.light_cone_solution:
-            if my_slice.has_key('object'):
-                del my_slice['object']
+            if my_slice.has_key("object"):
+                del my_slice["object"]
 
         if recycle:
             mylog.debug("Recycling solution made with %s with new seed %s." %
@@ -564,7 +564,7 @@
             # than 1 after this slice.
             if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
                     (box_fraction_used > self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used + self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
+                    (box_fraction_used + self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
@@ -574,63 +574,63 @@
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
-                newAxis = self.light_cone_solution[q-1]['projection_axis']
-                newCenter = copy.deepcopy(self.light_cone_solution[q-1]['projection_center'])
+                newAxis = self.light_cone_solution[q-1]["projection_axis"]
+                newCenter = copy.deepcopy(self.light_cone_solution[q-1]["projection_center"])
                 newCenter[newAxis] += \
-                    0.5 * (self.light_cone_solution[q]['box_depth_fraction'] +
-                           self.light_cone_solution[q-1]['box_depth_fraction'])
+                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
+                           self.light_cone_solution[q-1]["box_depth_fraction"])
                 if newCenter[newAxis] >= 1.0:
                     newCenter[newAxis] -= 1.0
 
             if recycle:
-                output['projection_axis'] = self.master_solution[q]['projection_axis']
+                output["projection_axis"] = self.master_solution[q]["projection_axis"]
             else:
-                output['projection_axis'] = newAxis
+                output["projection_axis"] = newAxis
 
-            box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
+            box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]
 
             # Make list of rectangle corners to calculate common volume.
             newCube = np.zeros(shape=(len(newCenter), 2))
             oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
-                if (w == self.master_solution[q]['projection_axis']):
-                    oldCube[w] = [self.master_solution[q]['projection_center'][w] -
-                                  0.5 * self.master_solution[q]['box_depth_fraction'],
-                                  self.master_solution[q]['projection_center'][w] +
-                                  0.5 * self.master_solution[q]['box_depth_fraction']]
+                if (w == self.master_solution[q]["projection_axis"]):
+                    oldCube[w] = [self.master_solution[q]["projection_center"][w] -
+                                  0.5 * self.master_solution[q]["box_depth_fraction"],
+                                  self.master_solution[q]["projection_center"][w] +
+                                  0.5 * self.master_solution[q]["box_depth_fraction"]]
                 else:
-                    oldCube[w] = [self.master_solution[q]['projection_center'][w] -
-                                  0.5 * self.master_solution[q]['box_width_fraction'],
-                                  self.master_solution[q]['projection_center'][w] +
-                                  0.5 * self.master_solution[q]['box_width_fraction']]
+                    oldCube[w] = [self.master_solution[q]["projection_center"][w] -
+                                  0.5 * self.master_solution[q]["box_width_fraction"],
+                                  self.master_solution[q]["projection_center"][w] +
+                                  0.5 * self.master_solution[q]["box_width_fraction"]]
 
-                if (w == output['projection_axis']):
+                if (w == output["projection_axis"]):
                     if recycle:
                         newCube[w] = oldCube[w]
                     else:
                         newCube[w] = \
                           [newCenter[w] -
-                           0.5 * self.master_solution[q]['box_depth_fraction'],
+                           0.5 * self.master_solution[q]["box_depth_fraction"],
                            newCenter[w] +
-                           0.5 * self.master_solution[q]['box_depth_fraction']]
+                           0.5 * self.master_solution[q]["box_depth_fraction"]]
                 else:
                     newCube[w] = [newCenter[w] -
-                                  0.5 * self.master_solution[q]['box_width_fraction'],
+                                  0.5 * self.master_solution[q]["box_width_fraction"],
                                   newCenter[w] +
-                                  0.5 * self.master_solution[q]['box_width_fraction']]
+                                  0.5 * self.master_solution[q]["box_width_fraction"]]
 
             my_volume += common_volume(oldCube, newCube,
                                            periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
-            total_volume += output['box_depth_fraction'] * \
-              output['box_width_fraction']**2
+            total_volume += output["box_depth_fraction"] * \
+              output["box_width_fraction"]**2
 
             # Replace centers for every axis except the line of sight axis.
             for w in range(len(newCenter)):
                 if not(recycle and
-                       (w == self.light_cone_solution[q]['projection_axis'])):
-                    self.light_cone_solution[q]['projection_center'][w] = \
+                       (w == self.light_cone_solution[q]["projection_axis"])):
+                    self.light_cone_solution[q]["projection_center"][w] = \
                       newCenter[w]
 
         if recycle:
@@ -657,7 +657,7 @@
 
         mylog.info("Saving light cone solution to %s." % filename)
 
-        f = open(filename, 'w')
+        f = open(filename, "w")
         if self.recycle_solution:
             f.write("Recycled Solution\n")
             f.write("OriginalRandomSeed = %s\n" % self.original_random_seed)
@@ -669,10 +669,10 @@
         f.write("\n")
         for q, output in enumerate(self.light_cone_solution):
             f.write("Proj %04d, %s, z = %f, depth/box = %f, width/box = %f, axis = %d, center = %f, %f, %f\n" %
-                    (q, output['filename'], output['redshift'],
-                     output['box_depth_fraction'], output['box_width_fraction'],
-                     output['projection_axis'], output['projection_center'][0],
-                     output['projection_center'][1], output['projection_center'][2]))
+                    (q, output["filename"], output["redshift"],
+                     output["box_depth_fraction"], output["box_width_fraction"],
+                     output["projection_axis"], output["projection_center"][0],
+                     output["projection_center"][1], output["projection_center"][2]))
         f.close()
 
     @parallel_root_only
@@ -685,7 +685,7 @@
             attrs = {}
         
         # Make list of redshifts to include as a dataset attribute.
-        redshift_list = np.array([my_slice['redshift'] \
+        redshift_list = np.array([my_slice["redshift"] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -693,7 +693,7 @@
 
         if (filename is None):
             filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix)
-        if not(filename.endswith('.h5')):
+        if not(filename.endswith(".h5")):
                filename += ".h5"
 
         if pstack.size == 0:
@@ -711,8 +711,8 @@
         dataset = fh.create_dataset(field_node,
                                           data=pstack)
         dataset.attrs["units"] = str(pstack.units)
-        dataset.attrs['redshifts'] = redshift_list
-        dataset.attrs['observer_redshift'] = np.float(self.observer_redshift)
+        dataset.attrs["redshifts"] = redshift_list
+        dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
         for key, value in attrs.items():
             dataset.attrs[key] = value
 
@@ -724,8 +724,8 @@
             dataset = fh.create_dataset(weight_field_node,
                                         data=wstack)
             dataset.attrs["units"] = str(wstack.units)
-            dataset.attrs['redshifts'] = redshift_list
-            dataset.attrs['observer_redshift'] = np.float(self.observer_redshift)
+            dataset.attrs["redshifts"] = redshift_list
+            dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
             for key, value in attrs.items():
                 dataset.attrs[key] = value
 


https://bitbucket.org/yt_analysis/yt/commits/3b8d39ed4b45/
Changeset:   3b8d39ed4b45
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 16:24:40
Summary:     Fixed image paneling issue.
Affected #:  1 file

diff -r ce25296b4e18552e9a962c1abd35db4085a3f1e1 -r 3b8d39ed4b4548a770a8d703294c72e69a3b1d14 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -111,7 +111,7 @@
     for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
         proj.field_data[my_field] = \
           my_slice["object"].arr(proj.field_data[my_field]).flatten()
-                
+
     # Delete originals.
     del original_px
     del original_py
@@ -124,10 +124,10 @@
     # Shift projection by random x and y offsets.
 
     image_axes = np.roll(np.arange(3), -my_slice["projection_axis"])[1:]
-    image_right_x = my_slice["object"].domain_right_edge[image_axes[0]]
-    image_left_x  = my_slice["object"].domain_left_edge[image_axes[0]]
-    image_right_y = my_slice["object"].domain_right_edge[image_axes[1]]
-    image_left_y  = my_slice["object"].domain_left_edge[image_axes[1]]
+    di_left_x  = my_slice["object"].domain_left_edge[image_axes[0]]
+    di_right_x = my_slice["object"].domain_right_edge[image_axes[0]]
+    di_left_y  = my_slice["object"].domain_left_edge[image_axes[1]]
+    di_right_y = my_slice["object"].domain_right_edge[image_axes[1]]
     
     offset = my_slice["projection_center"].copy() * \
       my_slice["object"].domain_width
@@ -138,19 +138,19 @@
     proj.field_data["py"] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj.field_data["px"][proj.field_data["px"] < image_left_x] += \
-      np.ceil(my_slice["box_width_fraction"]) * image_right_x
-    proj.field_data["py"][proj.field_data["py"] < image_left_y] += \
-      np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    proj.field_data["px"][proj.field_data["px"] < di_left_x] += \
+      np.ceil(my_slice["box_width_fraction"]) * di_right_x
+    proj.field_data["py"][proj.field_data["py"] < di_left_y] += \
+      np.ceil(my_slice["box_width_fraction"]) * di_right_y
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj.field_data["px"] + 0.5 * proj.field_data["pdx"] > \
-      np.ceil(my_slice["box_width_fraction"]) * image_right_x
+      np.ceil(my_slice["box_width_fraction"]) * di_right_x
     add_x_px = proj.field_data["px"][add_x_right]
-    add_x_px -= np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    add_x_px -= np.ceil(my_slice["box_width_fraction"]) * di_right_x
     add_x_py = proj.field_data["py"][add_x_right]
     add_x_pdx = proj.field_data["pdx"][add_x_right]
     add_x_pdy = proj.field_data["pdy"][add_x_right]
@@ -159,9 +159,9 @@
     del add_x_right
 
     # Cells hanging off the left edge.
-    add_x_left = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < image_left_x
+    add_x_left = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < di_left_x
     add2_x_px = proj.field_data["px"][add_x_left]
-    add2_x_px += np.ceil(my_slice["box_width_fraction"]) * image_right_x
+    add2_x_px += np.ceil(my_slice["box_width_fraction"]) * di_right_x
     add2_x_py = proj.field_data["py"][add_x_left]
     add2_x_pdx = proj.field_data["pdx"][add_x_left]
     add2_x_pdy = proj.field_data["pdy"][add_x_left]
@@ -171,10 +171,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj.field_data["py"] + 0.5 * proj.field_data["pdy"] > \
-      np.ceil(my_slice["box_width_fraction"]) * image_right_y
+      np.ceil(my_slice["box_width_fraction"]) * di_right_y
     add_y_px = proj.field_data["px"][add_y_right]
     add_y_py = proj.field_data["py"][add_y_right]
-    add_y_py -= np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    add_y_py -= np.ceil(my_slice["box_width_fraction"]) * di_right_y
     add_y_pdx = proj.field_data["pdx"][add_y_right]
     add_y_pdy = proj.field_data["pdy"][add_y_right]
     add_y_field = proj.field_data[proj_field][add_y_right]
@@ -182,10 +182,10 @@
     del add_y_right
 
     # Cells hanging off the bottom edge.
-    add_y_left = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < image_left_y
+    add_y_left = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < di_left_y
     add2_y_px = proj.field_data["px"][add_y_left]
     add2_y_py = proj.field_data["py"][add_y_left]
-    add2_y_py += np.ceil(my_slice["box_width_fraction"]) * image_right_y
+    add2_y_py += np.ceil(my_slice["box_width_fraction"]) * di_right_y
     add2_y_pdx = proj.field_data["pdx"][add_y_left]
     add2_y_pdy = proj.field_data["pdy"][add_y_left]
     add2_y_field = proj.field_data[proj_field][add_y_left]
@@ -224,7 +224,8 @@
     # Tiles were made rounding up the width to the nearest integer.
     # Cut off the edges to get the specified width.
     # Cut in the x direction.
-    cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < image_right_x
+    cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < \
+      di_right_x * my_slice["box_width_fraction"]
     proj.field_data["px"] = proj.field_data["px"][cut_x]
     proj.field_data["py"] = proj.field_data["py"][cut_x]
     proj.field_data["pdx"] = proj.field_data["pdx"][cut_x]
@@ -234,7 +235,8 @@
     del cut_x
 
     # Cut in the y direction.
-    cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < image_right_y
+    cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < \
+      di_right_y * my_slice["box_width_fraction"]
     proj.field_data["px"] = proj.field_data["px"][cut_y]
     proj.field_data["py"] = proj.field_data["py"][cut_y]
     proj.field_data["pdx"] = proj.field_data["pdx"][cut_y]
@@ -245,13 +247,9 @@
 
     # Create fixed resolution buffer to return back to the light cone object.
     # These buffers will be stacked together to make the light cone.
-    # frb = FixedResolutionBuffer(proj, (0, my_slice["box_width_fraction"],
-    #                                    0, my_slice["box_width_fraction"]),
     frb = FixedResolutionBuffer(proj, 
-                                (image_left_x,
-                                 image_right_x * my_slice["box_width_fraction"],
-                                 image_left_y,
-                                 image_right_y * my_slice["box_width_fraction"]),
-                                (pixels, pixels), antialias=False)
+        (di_left_x, di_right_x * my_slice["box_width_fraction"],
+         di_left_y, di_right_y * my_slice["box_width_fraction"]),
+        (pixels, pixels), antialias=False)
 
     return frb


https://bitbucket.org/yt_analysis/yt/commits/d16b36e4ab79/
Changeset:   d16b36e4ab79
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 17:07:47
Summary:     Updating light cone recipe.
Affected #:  1 file

diff -r 3b8d39ed4b4548a770a8d703294c72e69a3b1d14 -r d16b36e4ab792e0d1ee27b86830845cac85d2dd7 doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,12 +1,8 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+import yt
+from yt.analysis_modules.cosmological_observation.light_cone.light_cone import \
+     LightCone
 
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Create a LightCone object extending from z = 0 to z = 0.1
-# with a 600 arcminute field of view and a resolution of
-# 60 arcseconds.
+# Create a LightCone object extending from z = 0 to z = 0.1.
 
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
@@ -14,20 +10,20 @@
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                'Enzo', 0., 0.1,
                observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
                time_data=False)
 
 # Calculate a randomization of the solution.
 lc.calculate_light_cone_solution(seed=123456789)
 
 # Choose the field to be projected.
-field = 'SZY'
+field = 'szy'
 
+# Use the LightCone object to make a projection with a 600 arcminute 
+# field of view and a resolution of 60 arcseconds.
 # Set njobs to -1 to have one core work on each projection
-# in parallel.  Set save_slice_images to True to see an
-# image for each individual slice.
-lc.project_light_cone(field, save_stack=False,
+# in parallel.
+lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
+                      save_stack=True,
                       save_final_image=True,
-                      save_slice_images=False,
+                      save_slice_images=True,
                       njobs=-1)


https://bitbucket.org/yt_analysis/yt/commits/6c29b43c921c/
Changeset:   6c29b43c921c
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 17:15:55
Summary:     Limiting line length.
Affected #:  1 file

diff -r d16b36e4ab792e0d1ee27b86830845cac85d2dd7 -r 6c29b43c921c1f2dc9ac619fdfa05fbabfe12768 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -201,10 +201,12 @@
             # Simple error check to make sure more than 100% of box depth
             # is never required.
             if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
-                mylog.debug("Warning: box fraction required to go from z = %f to %f is %f" %
+                mylog.debug(("Warning: box fraction required to go from " +
+                             "z = %f to %f is %f") %
                             (self.light_cone_solution[q]["redshift"], z_next,
                              self.light_cone_solution[q]["box_depth_fraction"]))
-                mylog.debug("Full box delta z is %f, but it is %f to the next data dump." %
+                mylog.debug(("Full box delta z is %f, but it is %f to the " +
+                             "next data dump.") %
                             (self.light_cone_solution[q]["deltazMax"],
                              self.light_cone_solution[q]["redshift"]-z_next))
 
@@ -564,7 +566,8 @@
             # than 1 after this slice.
             if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
                     (box_fraction_used > self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used + self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
+                    (box_fraction_used +
+                     self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
@@ -573,9 +576,11 @@
                 newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
-                # Same axis and center as previous slice, but with depth center shifted.
+                # Same axis and center as previous slice, 
+                # but with depth center shifted.
                 newAxis = self.light_cone_solution[q-1]["projection_axis"]
-                newCenter = copy.deepcopy(self.light_cone_solution[q-1]["projection_center"])
+                newCenter = \
+                  copy.deepcopy(self.light_cone_solution[q-1]["projection_center"])
                 newCenter[newAxis] += \
                     0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
                            self.light_cone_solution[q-1]["box_depth_fraction"])
@@ -634,10 +639,12 @@
                       newCenter[w]
 
         if recycle:
-            mylog.debug("Fractional common volume between master and recycled solution is %.2e" % \
+            mylog.debug(("Fractional common volume between master and " +
+                         "recycled solution is %.2e") %
                         (my_volume / total_volume))
         else:
-            mylog.debug("Fraction of total volume in common with old solution is %.2e." % \
+            mylog.debug("Fraction of total volume in common with old " +
+                        "solution is %.2e." %
                         (my_volume / total_volume))
             self.master_solution = [copy.deepcopy(q) \
                                     for q in self.light_cone_solution]
@@ -668,7 +675,8 @@
         f.write("parameter_filename = %s\n" % self.parameter_filename)
         f.write("\n")
         for q, output in enumerate(self.light_cone_solution):
-            f.write("Proj %04d, %s, z = %f, depth/box = %f, width/box = %f, axis = %d, center = %f, %f, %f\n" %
+            f.write(("Proj %04d, %s, z = %f, depth/box = %f, " +
+                    "width/box = %f, axis = %d, center = %f, %f, %f\n") %
                     (q, output["filename"], output["redshift"],
                      output["box_depth_fraction"], output["box_width_fraction"],
                      output["projection_axis"], output["projection_center"][0],


https://bitbucket.org/yt_analysis/yt/commits/48a7011ba966/
Changeset:   48a7011ba966
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 17:48:07
Summary:     Removing solution recycling.
Affected #:  1 file

diff -r 6c29b43c921c1f2dc9ac619fdfa05fbabfe12768 -r 48a7011ba96664362866beff33f117fad3ece06a yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -33,8 +33,7 @@
 from yt.visualization.image_writer import \
     write_image
 from yt.units.yt_array import \
-    YTArray, \
-    YTQuantity
+    YTArray
 from .common_n_volume import \
     common_volume
 from .halo_mask import \
@@ -117,16 +116,8 @@
         self.output_dir = output_dir
         self.output_prefix = output_prefix
 
-        self.master_solution = [] # kept to compare with recycled solutions
         self.halo_mask = []
 
-        # Original random seed of the first solution.
-        self.original_random_seed = 0
-
-        # Parameters for recycling light cone solutions.
-        self.recycle_solution = False
-        self.recycle_random_seed = 0
-
         # Create output directory.
         if not os.path.exists(self.output_dir):
             only_on_root(os.mkdir, self.output_dir)
@@ -166,18 +157,13 @@
                        "minimal light cone.")
             self.minimum_coherent_box_fraction = 0
 
-        # Make sure recycling flag is off.
-        self.recycle_solution = False
-
         # Get rid of old halo mask, if one was there.
         self.halo_mask = []
 
-        if seed is not None:
-            self.original_random_seed = int(seed)
-
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        np.random.seed(self.original_random_seed)
+        seed = int(seed)
+        np.random.seed(seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -242,10 +228,6 @@
 
             box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]
 
-        # Store this as the master solution.
-        self.master_solution = [copy.deepcopy(q) \
-                                for q in self.light_cone_solution]
-
         # Write solution to a file.
         if filename is not None:
             self._save_light_cone_solution(filename=filename)
@@ -491,173 +473,6 @@
                 attrs={"field_of_view": str(field_of_view),
                        "image_resolution": str(image_resolution)})
 
-    def rerandomize_light_cone_solution(self, new_seed, recycle=True, filename=None):
-        """
-        When making a projection for a light cone, only randomizations along the
-        line of sight make any given projection unique, since the lateral shifting
-        and tiling is done after the projection is made.  Therefore, multiple light
-        cones can be made from a single set of projections by introducing different
-        lateral random shifts and keeping all the original shifts along the line of
-        sight.  This routine will take in a new random seed and rerandomize the
-        parts of the light cone that do not contribute to creating a unique
-        projection object.  Additionally, this routine is built such that if the
-        same random seed is given for the rerandomizing, the solution will be
-        identical to the original.
-
-        This routine has now been updated to be a general solution rescrambler.
-        If the keyword recycle is set to True, then it will recycle.  Otherwise, it
-        will create a completely new solution.
-
-        new_sed : float
-            The new random seed.
-        recycle : bool
-            If True, the new solution will have the same shift in the line of
-            sight as the original solution.  Since the projections of each
-            slice are serialized and stored for the entire width of the box
-            (even if the width used is left than the total box), the projection
-            data can be deserialized instead of being remade from scratch.
-            This can greatly speed up the creation of a large number of light
-            cone projections.
-            Default: True.
-        filename : string
-            If given, a text file detailing the solution will be written out.
-            Default: None.
-
-        """
-
-        # Get rid of old halo mask, if one was there.
-        self.halo_mask = []
-
-        # Clean pf objects out of light cone solution.
-        for my_slice in self.light_cone_solution:
-            if my_slice.has_key("object"):
-                del my_slice["object"]
-
-        if recycle:
-            mylog.debug("Recycling solution made with %s with new seed %s." %
-                        (self.original_random_seed, new_seed))
-            self.recycle_random_seed = int(new_seed)
-        else:
-            mylog.debug("Creating new solution with random seed %s." % new_seed)
-            self.original_random_seed = int(new_seed)
-            self.recycle_random_seed = 0
-
-        self.recycle_solution = recycle
-
-        # Keep track of fraction of volume in common between the original and
-        # recycled solution.
-        my_volume = 0.0
-        total_volume = 0.0
-
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
-
-        # Seed random number generator with new seed.
-        np.random.seed(int(new_seed))
-
-        for q, output in enumerate(self.light_cone_solution):
-            # It is necessary to make the same number of calls to the random
-            # number generator so the original solution willbe produced if the
-            # same seed is given.
-
-            # Get projection axis and center.
-            # If using box coherence, only get random axis and center if enough
-            # of the box has been used, or if box_fraction_used will be greater
-            # than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used > self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
-                # Get random projection axis and center.
-                # If recycling, axis will get thrown away since it is used in
-                # creating a unique projection object.
-                newAxis = np.random.randint(0, 3)
-
-                newCenter = [np.random.random() for i in range(3)]
-                box_fraction_used = 0.0
-            else:
-                # Same axis and center as previous slice, 
-                # but with depth center shifted.
-                newAxis = self.light_cone_solution[q-1]["projection_axis"]
-                newCenter = \
-                  copy.deepcopy(self.light_cone_solution[q-1]["projection_center"])
-                newCenter[newAxis] += \
-                    0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
-                           self.light_cone_solution[q-1]["box_depth_fraction"])
-                if newCenter[newAxis] >= 1.0:
-                    newCenter[newAxis] -= 1.0
-
-            if recycle:
-                output["projection_axis"] = self.master_solution[q]["projection_axis"]
-            else:
-                output["projection_axis"] = newAxis
-
-            box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]
-
-            # Make list of rectangle corners to calculate common volume.
-            newCube = np.zeros(shape=(len(newCenter), 2))
-            oldCube = np.zeros(shape=(len(newCenter), 2))
-            for w in range(len(newCenter)):
-                if (w == self.master_solution[q]["projection_axis"]):
-                    oldCube[w] = [self.master_solution[q]["projection_center"][w] -
-                                  0.5 * self.master_solution[q]["box_depth_fraction"],
-                                  self.master_solution[q]["projection_center"][w] +
-                                  0.5 * self.master_solution[q]["box_depth_fraction"]]
-                else:
-                    oldCube[w] = [self.master_solution[q]["projection_center"][w] -
-                                  0.5 * self.master_solution[q]["box_width_fraction"],
-                                  self.master_solution[q]["projection_center"][w] +
-                                  0.5 * self.master_solution[q]["box_width_fraction"]]
-
-                if (w == output["projection_axis"]):
-                    if recycle:
-                        newCube[w] = oldCube[w]
-                    else:
-                        newCube[w] = \
-                          [newCenter[w] -
-                           0.5 * self.master_solution[q]["box_depth_fraction"],
-                           newCenter[w] +
-                           0.5 * self.master_solution[q]["box_depth_fraction"]]
-                else:
-                    newCube[w] = [newCenter[w] -
-                                  0.5 * self.master_solution[q]["box_width_fraction"],
-                                  newCenter[w] +
-                                  0.5 * self.master_solution[q]["box_width_fraction"]]
-
-            my_volume += common_volume(oldCube, newCube,
-                                           periodic=np.array([[0, 1],
-                                                              [0, 1],
-                                                              [0, 1]]))
-            total_volume += output["box_depth_fraction"] * \
-              output["box_width_fraction"]**2
-
-            # Replace centers for every axis except the line of sight axis.
-            for w in range(len(newCenter)):
-                if not(recycle and
-                       (w == self.light_cone_solution[q]["projection_axis"])):
-                    self.light_cone_solution[q]["projection_center"][w] = \
-                      newCenter[w]
-
-        if recycle:
-            mylog.debug(("Fractional common volume between master and " +
-                         "recycled solution is %.2e") %
-                        (my_volume / total_volume))
-        else:
-            mylog.debug("Fraction of total volume in common with old " +
-                        "solution is %.2e." %
-                        (my_volume / total_volume))
-            self.master_solution = [copy.deepcopy(q) \
-                                    for q in self.light_cone_solution]
-
-        # Write solution to a file.
-        if filename is not None:
-            self._save_light_cone_solution(filename=filename)
-
-    def restore_master_solution(self):
-        "Reset the active light cone solution to the master solution."
-        self.light_cone_solution = [copy.deepcopy(q) \
-                                    for q in self.master_solution]
-
     @parallel_root_only
     def _save_light_cone_solution(self, filename="light_cone.dat"):
         "Write out a text file with information on light cone solution."
@@ -665,13 +480,6 @@
         mylog.info("Saving light cone solution to %s." % filename)
 
         f = open(filename, "w")
-        if self.recycle_solution:
-            f.write("Recycled Solution\n")
-            f.write("OriginalRandomSeed = %s\n" % self.original_random_seed)
-            f.write("RecycleRandomSeed = %s\n" % self.recycle_random_seed)
-        else:
-            f.write("Original Solution\n")
-            f.write("OriginalRandomSeed = %s\n" % self.original_random_seed)
         f.write("parameter_filename = %s\n" % self.parameter_filename)
         f.write("\n")
         for q, output in enumerate(self.light_cone_solution):


https://bitbucket.org/yt_analysis/yt/commits/63469f33d0af/
Changeset:   63469f33d0af
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 17:55:44
Summary:     Removing functionality to create unique light cones.
Affected #:  5 files

diff -r 48a7011ba96664362866beff33f117fad3ece06a -r 63469f33d0af17f4cb9512e698802847ebc41221 doc/source/cookbook/unique_light_cone_projections.py
--- a/doc/source/cookbook/unique_light_cone_projections.py
+++ /dev/null
@@ -1,34 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Instantiate a light cone.
-lc = LightCone("enzo_tiny_cosmology/32Mpc_32.enzo", 'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=120.0,
-               image_resolution_in_arcseconds=60.0,
-               use_minimum_datasets=True,
-               time_data=False,
-               output_dir='LC_U', output_prefix='LightCone')
-
-# Try to find 10 solutions that have at most 10% volume in
-# common and give up after 50 consecutive failed attempts.
-# The recycle=True setting tells the code to first attempt
-# to use solutions with the same projection axes as other
-# solutions.  This will save time when making the projection.
-yt.find_unique_solutions(lc, max_overlap=0.10, failures=50,
-                         seed=123456789, recycle=True,
-                         solutions=10, filename='LC_U/unique.dat')
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make light cone projections with each of the random seeds
-# found above.  All output files will be written with unique
-# names based on the random seed numbers.
-yt.project_unique_light_cones(lc, 'LC_U/unique.dat', field,
-                              save_stack=False,
-                              save_final_image=True,
-                              save_slice_images=False)

diff -r 48a7011ba96664362866beff33f117fad3ece06a -r 63469f33d0af17f4cb9512e698802847ebc41221 yt/analysis_modules/cosmological_observation/api.py
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -17,9 +17,7 @@
     CosmologySplice
 
 from .light_cone.api import \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones
+    LightCone
 
 from .light_ray.api import \
     LightRay

diff -r 48a7011ba96664362866beff33f117fad3ece06a -r 63469f33d0af17f4cb9512e698802847ebc41221 yt/analysis_modules/cosmological_observation/light_cone/api.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/api.py
@@ -1,5 +1,5 @@
 """
-API for lightcone
+API for light_cone
 
 
 
@@ -15,7 +15,3 @@
 
 from .light_cone import \
     LightCone
-
-from .unique_solution import \
-    project_unique_light_cones, \
-    find_unique_solutions

diff -r 48a7011ba96664362866beff33f117fad3ece06a -r 63469f33d0af17f4cb9512e698802847ebc41221 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Function to calculate volume in common between two n-cubes, with optional
-periodic boundary conditions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-def common_volume(n_cube_1, n_cube_2, periodic=None):
-    "Return the n-volume in common between the two n-cubes."
-
-    # Check for proper args.
-    if ((len(np.shape(n_cube_1)) != 2) or
-        (np.shape(n_cube_1)[1] != 2) or
-        (np.shape(n_cube_1) != np.shape(n_cube_2))):
-        print "Arguments must be 2 (n, 2) numpy array."
-        return 0
-
-    if ((periodic is not None) and
-        (np.shape(n_cube_1) != np.shape(periodic))):
-        print "periodic argument must be (n, 2) numpy array."
-        return 0
-
-    nCommon = 1.0
-    for q in range(np.shape(n_cube_1)[0]):
-        if (periodic is None):
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
-        else:
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q],
-                                      periodic=periodic[q])
-
-    return nCommon
-
-def common_segment(seg1, seg2, periodic=None):
-    "Return the length of the common segment."
-
-    # Check for proper args.
-    if ((len(seg1) != 2) or (len(seg2) != 2)):
-        print "Arguments must be arrays of size 2."
-        return 0
-
-    # If not periodic, then this is very easy.
-    if periodic is None:
-        seg1.sort()
-        len1 = seg1[1] - seg1[0]
-        seg2.sort()
-        len2 = seg2[1] - seg2[0]
-
-        common = 0.0
-
-        add = seg1[1] - seg2[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        add = seg2[1] - seg1[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        common = min(common, len1, len2)
-        return common
-
-    # If periodic, it's a little more complicated.
-    else:
-        if len(periodic) != 2:
-            print "periodic array must be of size 2."
-            return 0
-
-        seg1.sort()
-        flen1 = seg1[1] - seg1[0]
-        len1 = flen1 - int(flen1)
-        seg2.sort()
-        flen2 = seg2[1] - seg2[0]
-        len2 = flen2 - int(flen2)
-
-        periodic.sort()
-        scale = periodic[1] - periodic[0]
-
-        if (abs(int(flen1)-int(flen2)) >= scale):
-            return min(flen1, flen2)
-
-        # Adjust for periodicity
-        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
-        seg1[1] = seg1[0] + len1
-        if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
-        seg2[1] = seg2[0] + len2
-        if (seg2[1] > periodic[1]): seg2[1] -= scale
-
-        # create list of non-periodic segments
-        pseg1 = []
-        if (seg1[0] >= seg1[1]):
-            pseg1.append([seg1[0], periodic[1]])
-            pseg1.append([periodic[0], seg1[1]])
-        else:
-            pseg1.append(seg1)
-        pseg2 = []
-        if (seg2[0] >= seg2[1]):
-            pseg2.append([seg2[0], periodic[1]])
-            pseg2.append([periodic[0], seg2[1]])
-        else:
-            pseg2.append(seg2)
-
-        # Add up common segments.
-        common = min(int(flen1), int(flen2))
-
-        for subseg1 in pseg1:
-            for subseg2 in pseg2:
-                common += common_segment(subseg1, subseg2)
-
-        return common

diff -r 48a7011ba96664362866beff33f117fad3ece06a -r 63469f33d0af17f4cb9512e698802847ebc41221 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ /dev/null
@@ -1,275 +0,0 @@
-"""
-Functions to generate unique light cone solutions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import numpy as np
-import random as rand
-import sys
-
-from yt.funcs import *
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_root_only
-
-from .light_cone import \
-     LightCone
-from .common_n_volume import \
-     common_volume
-
-def project_unique_light_cones(lightcone, seed_file, field, **kwargs):
-    r"""Make light cone projections using list of random
-    seeds in a file.
-
-    Parameters
-    ----------
-    lightcone : LightCone
-        A LightCone object.
-    seed_file : str
-        File containing random seeds for light cone projections.
-    field : str
-        The field to be projected.
-
-    Any additional parameters to project_light_cone should be
-    given here.
-    """
-
-    seedList = _read_seed_file(seed_file)
-
-    prefix = lightcone.output_prefix
-    lightcone.calculate_light_cone_solution(seed=0)
-    lastSeed = None
-
-    for seed in seedList:
-        if (seed['master'] != lastSeed):
-            lightcone.rerandomize_light_cone_solution(seed['master'],
-                                                      recycle=False)
-            lastSeed = seed['master']
-        if (seed['recycle'] is not None):
-            lightcone.rerandomize_light_cone_solution(seed['recycle'],
-                                                      recycle=True)
-
-        lightcone.output_prefix = "%s_%s_%s" % \
-          (prefix, seed['master'], seed['recycle'])
-        lightcone.project_light_cone(field, **kwargs)
-
-def find_unique_solutions(lightcone1, solutions=10, seed=None,
-                          max_overlap=0.25, failures=10,
-                          recycle=True, filename='unique.dat'):
-    r"""Find a set of random seeds that will give light cones
-    will minimal volume overlap.
-
-    Parameters
-    ----------
-    lightcone1 : LightCone
-        A LightCone object.
-    solutions : int
-        The desired number of unique solutions.
-        Default: 10
-    seed : int
-        The random seed for generating light cone randomizations.
-        Default: None
-    max_overlap : float
-        The maximum fractional volume an accepted solution can have
-        in common with the other solutions.
-        Default: 0.25
-    failures : int
-        The number of failed attempts before ending.
-        Default: 10
-    recycle : bool
-        If True, attempt to use existing realizations.  This
-        will speed up light cone projectinos.
-        Default: True
-    filename : str
-        Output file name for the list of unique seeds.
-        Default : 'unique.dat'
-
-    """
-
-    lightcone2 = copy.deepcopy(lightcone1)
-    lightcone1.calculate_light_cone_solution(seed=0)
-    lightcone2.calculate_light_cone_solution(seed=0)
-
-    unique_seeds = []
-    if recycle:
-        master = None
-    new_recycle_seed = None
-    fails = 0
-    recycle_fails = 0
-
-    max_common = 0.0
-
-    # Need to continuall save and reset the state of the random
-    # number generator since it is being reset by the light
-    # cone generator.
-    if seed is None:
-        state = None
-    else:
-        rand.seed(seed)
-        state = rand.getstate()
-
-    fail_digits = str(int(np.log10(failures))+1)
-
-    while (len(unique_seeds) < solutions):
-        # Create new random seed.
-        if (recycle and master is not None):
-            new_seed = master
-            if state is not None: rand.setstate(state)
-            new_recycle_seed = rand.randint(1, 1e9)
-            state = rand.getstate()
-        else:
-            if state is not None: rand.setstate(state)
-            new_seed = rand.randint(1, 1e9)
-            state = rand.getstate()
-            if recycle:
-                master = new_seed
-                recycle_fails = 0
-            new_recycle_seed = None
-
-        sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+fail_digits+"d, %"+fail_digits+"d.\r") % \
-                             (len(unique_seeds), fails, recycle_fails))
-
-        lightcone1.rerandomize_light_cone_solution(new_seed,
-                                                   recycle=False)
-        if new_recycle_seed is not None:
-            lightcone1.rerandomize_light_cone_solution(new_recycle_seed,
-                                                       recycle=True)
-
-        # Compare with all other seeds.
-        test_pass = True
-        for unique_seed in unique_seeds:
-            lightcone2.rerandomize_light_cone_solution(unique_seed['master'],
-                                                       recycle=False)
-            if unique_seed['recycle'] is not None:
-                lightcone2.rerandomize_light_cone_solution(unique_seed['recycle'],
-                                                           recycle=True)
-
-            common = _compare_solutions(lightcone1.light_cone_solution,
-                                        lightcone2.light_cone_solution)
-
-            if (common > max_overlap):
-                test_pass = False
-                break
-            else:
-                max_common = max(max_common, common)
-
-        if test_pass:
-            unique_seeds.append({'master':new_seed,
-                                 'recycle':new_recycle_seed})
-            fails = 0
-            recycle_fails = 0
-
-        else:
-            if recycle:
-                recycle_fails += 1
-            else:
-                fails += 1
-
-            if (recycle_fails >= failures):
-                sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+fail_digits+"d, %"+fail_digits+"d.\n") % \
-                                 (len(unique_seeds), fails, recycle_fails))
-                fails += 1
-                mylog.info("Max recycled failures reached with master seed %d." % \
-                           new_seed)
-                master = None
-            if (fails >= failures):
-                sys.stderr.write(("Unique solutions: %d, consecutive failures: %"+fail_digits+"d, %"+fail_digits+"d.\n") % \
-                                 (len(unique_seeds), fails, recycle_fails))
-                mylog.error("Max consecutive failures reached.")
-                break
-
-    mylog.info("Created %d unique solutions." % len(unique_seeds))
-    mylog.info("Maximum common volume is %.2e." % max_common)
-    _write_seed_file(unique_seeds, filename)
-    return unique_seeds
-
-def _compare_solutions(solution1, solution2):
-    "Calculate common volume between two light cone solutions."
-
-    if (len(solution1) != len(solution2)):
-        mylog.error("Cannot compare light cone solutions with unequal numbers of slices.")
-        return -1
-
-    my_volume = 0.0
-    total_volume = 0.0
-
-    # Check that solution volumes are the same.
-    if((solution1[0]['box_depth_fraction'] *
-        solution1[0]['box_width_fraction']**2) !=
-       (solution2[0]['box_depth_fraction'] *
-        solution2[0]['box_width_fraction']**2)):
-        mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
-
-    for q in range(len(solution1)):
-        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
-        volume1 = 1.0
-        for w in range(len(cube1)):
-            if (w == solution1[q]['projection_axis']):
-                width = solution1[q]['box_depth_fraction']
-            else:
-                width = solution1[q]['box_width_fraction']
-            volume1 *= width
-            cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
-                        solution1[q]['projection_center'][w] + 0.5 * width]
-
-        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
-        volume2 = 1.0
-        for w in range(len(cube2)):
-            if (w == solution2[q]['projection_axis']):
-                width = solution2[q]['box_depth_fraction']
-            else:
-                width = solution2[q]['box_width_fraction']
-            volume2 *= width
-            cube2[w] = [solution2[q]['projection_center'][w] - 0.5 * width,
-                        solution2[q]['projection_center'][w] + 0.5 * width]
-
-        total_volume += min(volume1, volume2)
-        my_volume += common_volume(cube1, cube2,
-                                   periodic=np.array([[0, 1],
-                                                      [0, 1],
-                                                      [0, 1]]))
-
-    return (my_volume / total_volume)
-
-def _read_seed_file(filename):
-    "Read list of random seeds from a file."
-
-    mylog.info("Reading random seed list from %s." % filename)
-
-    seed_list = []
-
-    lines = file(filename)
-    for line in lines:
-        if line[0] != '#':
-            line = line.strip()
-            onLine = line.split(',')
-            if (len(onLine) == 1):
-                seed_list.append({'master':onLine[0], 'recycle':None})
-            else:
-                seed_list.append({'master':onLine[0], 'recycle':onLine[1]})
-
-    return seed_list
-
- at parallel_root_only
-def _write_seed_file(seed_list, filename):
-    "Write list of random seeds to a file."
-
-    mylog.info("Writing random seed list to %s." % filename)
-
-    f = open(filename, 'w')
-    for seed in seed_list:
-        if seed['recycle'] is None:
-            f.write("%s\n" % seed['master'])
-        else:
-            f.write("%s,%s\n" % (seed['master'], seed['recycle']))
-    f.close()


https://bitbucket.org/yt_analysis/yt/commits/ee9744a977f7/
Changeset:   ee9744a977f7
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 18:04:30
Summary:     Removing light cone halo mask functionality.
Affected #:  3 files

diff -r 63469f33d0af17f4cb9512e698802847ebc41221 -r ee9744a977f7be05841ccf8b27c896a9c58ec21a doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ /dev/null
@@ -1,78 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-
-# Instantiate a light cone object as usual.
-lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
-               'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
-               time_data=False,
-               output_dir='LC_HM', output_prefix='LightCone')
-
-# Calculate the light cone solution.
-lc.calculate_light_cone_solution(seed=123456789,
-                                 filename='LC_HM/lightcone.dat')
-
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir': 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Get the halo list for the active solution of this light cone using
-# the HaloProfiler settings set up above.
-# Write the boolean map to an hdf5 file called 'halo_mask.h5'.
-# Write a text file detailing the location, redshift, radius, and mass
-# of each halo in light cone projection.
-lc.get_halo_mask(mask_file='LC_HM/halo_mask.h5',
-                 map_file='LC_HM/halo_map.out',
-                 cube_file='LC_HM/halo_cube.h5',
-                 virial_overdensity=100,
-                 halo_profiler_parameters=halo_profiler_parameters,
-                 njobs=1, dynamic=False)
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make the light cone projection and apply the halo mask.
-pc = lc.project_light_cone(field, save_stack=False,
-                           save_final_image=True,
-                           save_slice_images=False,
-                           apply_halo_mask=True)

diff -r 63469f33d0af17f4cb9512e698802847ebc41221 -r ee9744a977f7be05841ccf8b27c896a9c58ec21a yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Light cone halo mask functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import h5py
-import numpy as np
-
-from yt.funcs import *
-from yt.analysis_modules.halo_profiler.api import \
-     HaloProfiler
-from yt.convenience import load
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_objects, \
-     parallel_root_only
-
-def _light_cone_halo_mask(lightCone, cube_file=None,
-                          mask_file=None, map_file=None,
-                          halo_profiler_parameters=None,
-                          virial_overdensity=200,
-                          njobs=1, dynamic=False):
-    "Make a boolean mask to cut clusters out of light cone projections."
-
-    if halo_profiler_parameters is None:
-        halo_profiler_parameters = {}
-
-    pixels = int(lightCone.field_of_view_in_arcminutes * 60.0 /
-                 lightCone.image_resolution_in_arcseconds)
-
-    # Loop through files in light cone solution and get virial quantities.
-    halo_map_storage = {}
-    for my_storage, my_slice in \
-      parallel_objects(lightCone.light_cone_solution,
-                       njobs=njobs, dynamic=dynamic,
-                       storage=halo_map_storage):
-        halo_list = _get_halo_list(my_slice['filename'],
-                                   **halo_profiler_parameters)
-        my_storage.result = \
-          {'mask': _make_slice_mask(my_slice, halo_list, pixels,
-                                    virial_overdensity)}
-        if map_file is not None:
-            my_storage.result['map'] = \
-              _make_slice_halo_map(my_slice, halo_list,
-                                   virial_overdensity)
-
-    # Reassemble halo mask and map lists.
-    light_cone_mask = []
-    halo_map = []
-    all_slices = halo_map_storage.keys()
-    all_slices.sort()
-    for i in all_slices:
-        light_cone_mask.append(halo_map_storage[i]['mask'])
-        if map_file is not None:
-            halo_map.extend(halo_map_storage[i]['map'])
-    del halo_map_storage
-
-    # Write out cube of masks from each slice.
-    if cube_file is not None:
-        _write_halo_mask(cube_file, np.array(light_cone_mask))
-
-    # Write out a text list of all halos in the image.
-    if map_file is not None:
-        _write_halo_map(map_file, halo_map)
-
-    # Write out final mask.
-    if mask_file is not None:
-        # Final mask is simply the product of the mask from each slice.
-        final_mask = np.ones(shape=(pixels, pixels))
-        for mask in light_cone_mask:
-            final_mask *= mask
-        _write_halo_mask(mask_file, final_mask)
-
-    return light_cone_mask
-
- at parallel_root_only
-def _write_halo_mask(filename, halo_mask):
-    r"""Write out an hdf5 file with the halo mask that
-    can be applied to an image.
-    """
-
-    mylog.info("Saving halo mask to %s." % filename)
-    output = h5py.File(filename, 'a')
-    if 'HaloMask' in output.keys():
-        del output['HaloMask']
-    output.create_dataset('HaloMask', data=np.array(halo_mask))
-    output.close()
-
- at parallel_root_only
-def _write_halo_map(filename, halo_map):
-    "Write a text list of halos in a light cone image."
-
-    mylog.info("Saving halo map to %s." % filename)
-    f = open(filename, 'w')
-    f.write("#z       x         y        r_image   r_mpc     m_Msun\n")
-    for halo in halo_map:
-        f.write("%7.4f %9.6f %9.6f %9.3e %9.3e %9.3e\n" % \
-                    (halo['redshift'], halo['x'], halo['y'],
-                     halo['radius_image'], halo['radius_mpc'],
-                     halo['mass']))
-    f.close()
-
-def _get_halo_list(dataset, halo_profiler_kwargs=None,
-                   halo_profiler_actions=None, halo_list='all'):
-    "Load a list of halos for the dataset."
-
-    if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
-    if halo_profiler_actions is None: halo_profiler_actions = []
-
-    hp = HaloProfiler(dataset, **halo_profiler_kwargs)
-    for action in halo_profiler_actions:
-        if not action.has_key('args'): action['args'] = ()
-        if not action.has_key('kwargs'): action['kwargs'] = {}
-        action['function'](hp, *action['args'], **action['kwargs'])
-
-    if halo_list == 'all':
-        return_list = copy.deepcopy(hp.all_halos)
-    elif halo_list == 'filtered':
-        return_list = copy.deepcopy(hp.filtered_halos)
-    else:
-        mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-        return_list = None
-
-    del hp
-    return return_list
-
-def _make_slice_mask(slice, halo_list, pixels, virial_overdensity):
-    "Make halo mask for one slice in light cone solution."
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Make boolean mask and cut out halos.
-    dx = slice['box_width_fraction'] / pixels
-    x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
-
-    # Cut out any pixel that has any part at all in the circle.
-    for q in range(len(all_halo_radius)):
-        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
-                              np.array(range(pixels))) != 0
-        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
-                              np.array(range(pixels))) != 0
-
-        xDistance = (np.abs(x - all_halo_x[q]) -
-                     (0.5 * dx)) * dif_xIndex
-        yDistance = (np.abs(x - all_halo_y[q]) -
-                     (0.5 * dx)) * dif_yIndex
-
-        distance = np.array([np.sqrt(w**2 + xDistance**2)
-                             for w in yDistance])
-        haloMask *= (distance >= all_halo_radius[q])
-
-    return haloMask
-
-def _make_slice_halo_map(slice, halo_list, virial_overdensity):
-    "Make list of halos for one slice in light cone solution."
-
-    # Get units to convert virial radii back to physical units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Construct list of halos
-    halo_map = []
-
-    for q in range(len(all_halo_x)):
-        # Give radius in both physics units and
-        # units of the image (0 to 1).
-        radius_mpc = all_halo_radius[q] * Mpc_units
-        radius_image = all_halo_radius[q] / slice['box_width_fraction']
-
-        halo_map.append({'x': all_halo_x[q] / slice['box_width_fraction'],
-                         'y': all_halo_y[q] / slice['box_width_fraction'],
-                         'redshift': slice['redshift'],
-                         'radius_mpc': radius_mpc,
-                         'radius_image': radius_image,
-                         'mass': all_halo_mass[q]})
-
-    return halo_map
-
-def _make_slice_halo_list(slice, halo_list, virial_overdensity):
-    "Make shifted, tiled list of halos for halo mask and halo map."
-
-   # Make numpy arrays for halo centers and virial radii.
-    halo_x = []
-    halo_y = []
-    halo_depth = []
-    halo_radius = []
-    halo_mass = []
-
-    # Get units to convert virial radii to code units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    for halo in halo_list:
-        if halo is not None:
-            center = copy.deepcopy(halo['center'])
-            halo_depth.append(center.pop(slice['projection_axis']))
-            halo_x.append(center[0])
-            halo_y.append(center[1])
-            halo_radius.append(halo['RadiusMpc_%d' % virial_overdensity] /
-                               Mpc_units)
-            halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
-
-    halo_x = np.array(halo_x)
-    halo_y = np.array(halo_y)
-    halo_depth = np.array(halo_depth)
-    halo_radius = np.array(halo_radius)
-    halo_mass = np.array(halo_mass)
-
-    # Adjust halo centers along line of sight.
-    depth_center = slice['projection_center'][slice['projection_axis']]
-    depth_left = depth_center - 0.5 * slice['box_depth_fraction']
-    depth_right = depth_center + 0.5 * slice['box_depth_fraction']
-
-    # Make boolean mask to pick out centers in region along line of sight.
-    # Halos near edges may wrap around to other side.
-    add_left = (halo_depth + halo_radius) > 1 # should be box width
-    add_right = (halo_depth - halo_radius) < 0
-
-    halo_depth = np.concatenate([halo_depth,
-                                 (halo_depth[add_left]-1),
-                                 (halo_depth[add_right]+1)])
-    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = np.concatenate([halo_radius,
-                                  halo_radius[add_left],
-                                  halo_radius[add_right]])
-    halo_mass = np.concatenate([halo_mass,
-                                halo_mass[add_left],
-                                halo_mass[add_right]])
-
-    del add_left, add_right
-
-    # Cut out the halos outside the region of interest.
-    if (slice['box_depth_fraction'] < 1):
-        if (depth_left < 0):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right)) | \
-                ((halo_depth + halo_radius >= depth_left + 1) &
-                 (halo_depth - halo_radius <= 1))
-        elif (depth_right > 1):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right - 1)) | \
-                ((halo_depth + halo_radius >= depth_left) &
-                 (halo_depth - halo_radius <= 1))
-        else:
-            mask = (halo_depth + halo_radius >= depth_left) & \
-              (halo_depth - halo_radius <= depth_right)
-
-        halo_x = halo_x[mask]
-        halo_y = halo_y[mask]
-        halo_radius = halo_radius[mask]
-        halo_mass = halo_mass[mask]
-        del mask
-    del halo_depth
-
-    all_halo_x = np.array([])
-    all_halo_y = np.array([])
-    all_halo_radius = np.array([])
-    all_halo_mass = np.array([])
-
-    # Tile halos of width box fraction is greater than one.
-    # Copy original into offset positions to make tiles.
-    for x in range(int(np.ceil(slice['box_width_fraction']))):
-        for y in range(int(np.ceil(slice['box_width_fraction']))):
-            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
-
-    del halo_x, halo_y, halo_radius, halo_mass
-
-    # Shift centers laterally.
-    offset = copy.deepcopy(slice['projection_center'])
-    del offset[slice['projection_axis']]
-
-    # Shift x and y positions.
-    all_halo_x -= offset[0]
-    all_halo_y -= offset[1]
-
-    # Wrap off-edge centers back around to
-    # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
-
-    # After shifting, some centers have fractional coverage
-    # on both sides of the box.
-    # Find those centers and make copies to be placed on the other side.
-
-    # Centers hanging off the right edge.
-    add_x_right = all_halo_x + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
-    add_x_halo_y = all_halo_y[add_x_right]
-    add_x_halo_radius = all_halo_radius[add_x_right]
-    add_x_halo_mass = all_halo_mass[add_x_right]
-    del add_x_right
-
-    # Centers hanging off the left edge.
-    add_x_left = all_halo_x - all_halo_radius < 0
-    add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
-    add2_x_halo_y = all_halo_y[add_x_left]
-    add2_x_halo_radius = all_halo_radius[add_x_left]
-    add2_x_halo_mass = all_halo_mass[add_x_left]
-    del add_x_left
-
-    # Centers hanging off the top edge.
-    add_y_right = all_halo_y + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_y_halo_x = all_halo_x[add_y_right]
-    add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
-    add_y_halo_radius = all_halo_radius[add_y_right]
-    add_y_halo_mass = all_halo_mass[add_y_right]
-    del add_y_right
-
-    # Centers hanging off the bottom edge.
-    add_y_left = all_halo_y - all_halo_radius < 0
-    add2_y_halo_x = all_halo_x[add_y_left]
-    add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
-    add2_y_halo_radius = all_halo_radius[add_y_left]
-    add2_y_halo_mass = all_halo_mass[add_y_left]
-    del add_y_left
-
-    # Add the hanging centers back to the projection data.
-    all_halo_x = np.concatenate([all_halo_x,
-                                 add_x_halo_x, add2_x_halo_x,
-                                 add_y_halo_x, add2_y_halo_x])
-    all_halo_y = np.concatenate([all_halo_y,
-                                 add_x_halo_y, add2_x_halo_y,
-                                 add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = np.concatenate([all_halo_radius,
-                                      add_x_halo_radius,
-                                      add2_x_halo_radius,
-                                      add_y_halo_radius,
-                                      add2_y_halo_radius])
-    all_halo_mass = np.concatenate([all_halo_mass,
-                                    add_x_halo_mass,
-                                    add2_x_halo_mass,
-                                    add_y_halo_mass,
-                                    add2_y_halo_mass])
-
-    del add_x_halo_x, add_x_halo_y, add_x_halo_radius
-    del add2_x_halo_x, add2_x_halo_y, add2_x_halo_radius
-    del add_y_halo_x, add_y_halo_y, add_y_halo_radius
-    del add2_y_halo_x, add2_y_halo_y, add2_y_halo_radius
-
-    # Cut edges to proper width.
-    cut_mask = (all_halo_x - all_halo_radius <
-                slice['box_width_fraction']) & \
-        (all_halo_y - all_halo_radius <
-         slice['box_width_fraction'])
-    all_halo_x = all_halo_x[cut_mask]
-    all_halo_y = all_halo_y[cut_mask]
-    all_halo_radius = all_halo_radius[cut_mask]
-    all_halo_mass = all_halo_mass[cut_mask]
-    del cut_mask
-
-    return (all_halo_x, all_halo_y,
-            all_halo_radius, all_halo_mass)

diff -r 63469f33d0af17f4cb9512e698802847ebc41221 -r ee9744a977f7be05841ccf8b27c896a9c58ec21a yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -36,8 +36,6 @@
     YTArray
 from .common_n_volume import \
     common_volume
-from .halo_mask import \
-    _light_cone_halo_mask
 from .light_cone_projection import \
     _light_cone_projection
 
@@ -116,8 +114,6 @@
         self.output_dir = output_dir
         self.output_prefix = output_prefix
 
-        self.halo_mask = []
-
         # Create output directory.
         if not os.path.exists(self.output_dir):
             only_on_root(os.mkdir, self.output_dir)
@@ -157,9 +153,6 @@
                        "minimal light cone.")
             self.minimum_coherent_box_fraction = 0
 
-        # Get rid of old halo mask, if one was there.
-        self.halo_mask = []
-
         # Calculate projection sizes, and get
         # random projection axes and centers.
         seed = int(seed)
@@ -232,76 +225,8 @@
         if filename is not None:
             self._save_light_cone_solution(filename=filename)
 
-    def get_halo_mask(self, mask_file=None,
-                      cube_file=None, map_file=None,
-                      virial_overdensity=200,
-                      halo_profiler_parameters=None,
-                      njobs=1, dynamic=False):
-        r"""Gets a halo mask from a file or makes a new one.
-
-        Parameters
-        ----------
-        mask_file : string
-            An hdf5 file to output the halo mask.
-            Default: None
-        cube_file : string
-            An hdf5 file to output a halo mask for each slice
-            of the light cone.
-            Default: None
-        map_file : string
-            A text file to which to output the halo map (locations
-            in the images of the halos.
-            Default: None
-        virial_overdensity : float
-            The overdensity at which the virial radius is calculated
-            and used as the radial for the halo mask.
-            Default: 200
-        halo_profiler_parameters: dict
-            A dictionary of parameters to be passed to the HaloProfiler
-            for each slice of the light cone.
-            Default: None
-        njobs : int
-            The number of parallel jobs over which the slices for the
-            halo mask will be split.  Choose -1 for one processor per
-            individual slice and 1 to have all processors work together
-            on each projection.
-            Default: 1
-        dynamic : bool
-            If True, use dynamic load balancing to create the projections.
-            Default: False.
-
-        """
-
-        if halo_profiler_parameters is None:
-            halo_profiler_parameters = {}
-
-        # Check if file already exists.
-        if mask_file is not None and os.path.exists(mask_file):
-            mylog.info("Reading halo mask from %s." % mask_file)
-            in_file = h5py.File(mask_file, "r")
-            self.halo_mask = in_file["HaloMask"].value
-            in_file.close()
-
-        # Otherwise, make a halo mask.
-        else:
-            halo_mask_cube = _light_cone_halo_mask(self, mask_file=mask_file,
-                                                   cube_file=cube_file,
-                                                   map_file=map_file,
-                                                   virial_overdensity=\
-                                                   virial_overdensity,
-                                                   halo_profiler_parameters=\
-                                                   halo_profiler_parameters,
-                                                   njobs=njobs,
-                                                   dynamic=dynamic)
-            # Collapse cube into final mask.
-            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
-                                     dtype=bool)
-            for mask in halo_mask_cube:
-                self.halo_mask *= mask
-            del halo_mask_cube
-
     def project_light_cone(self, field_of_view, image_resolution, field,
-                           weight_field=None, apply_halo_mask=False,
+                           weight_field=None,
                            save_stack=True, save_final_image=True,
                            save_slice_images=False,
                            cmap_name="algae", photon_field=False,
@@ -320,10 +245,6 @@
             the weight field of the projection.  This has the same meaning as
             in standard projections.
             Default: None.
-        apply_halo_mask : bool
-            if True, a boolean mask is apply to the light cone projection.  See
-            below for a description of halo masks.
-            Default: False.
         save_stack : bool
             if True, the light cone data including each individual
             slice is written to an hdf5 file.
@@ -451,15 +372,6 @@
 
         filename = os.path.join(self.output_dir, self.output_prefix)
 
-        # Apply halo mask.
-        if apply_halo_mask:
-            if len(self.halo_mask) > 0:
-                mylog.info("Applying halo mask.")
-                light_cone_projection *= self.halo_mask
-            else:
-                mylog.error("No halo mask loaded, call get_halo_mask.")
-
-
         # Write image.
         if save_final_image:
             only_on_root(write_image, np.log10(light_cone_projection),


https://bitbucket.org/yt_analysis/yt/commits/74813a72ea91/
Changeset:   74813a72ea91
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 18:44:34
Summary:     Fixing up save_solution function and updating recipe.
Affected #:  2 files

diff -r ee9744a977f7be05841ccf8b27c896a9c58ec21a -r 74813a72ea914548267cb8cba75d86b17714d802 doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,5 +1,5 @@
 import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import \
+from yt.analysis_modules.cosmological_observation.api import \
      LightCone
 
 # Create a LightCone object extending from z = 0 to z = 0.1.
@@ -13,7 +13,7 @@
                time_data=False)
 
 # Calculate a randomization of the solution.
-lc.calculate_light_cone_solution(seed=123456789)
+lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt")
 
 # Choose the field to be projected.
 field = 'szy'
@@ -23,6 +23,7 @@
 # Set njobs to -1 to have one core work on each projection
 # in parallel.
 lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
+                      weight_field=None,
                       save_stack=True,
                       save_final_image=True,
                       save_slice_images=True,

diff -r ee9744a977f7be05841ccf8b27c896a9c58ec21a -r 74813a72ea914548267cb8cba75d86b17714d802 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import copy
 import h5py
 import numpy as np
 import os
@@ -34,8 +33,6 @@
     write_image
 from yt.units.yt_array import \
     YTArray
-from .common_n_volume import \
-    common_volume
 from .light_cone_projection import \
     _light_cone_projection
 
@@ -177,6 +174,15 @@
                         self.light_cone_solution[q]["redshift"]) / \
                         self.simulation.box_size).in_units("")
 
+            # Calculate fraction of box required for width corresponding to
+            # requested image size.
+            proper_box_size = self.simulation.box_size / \
+              (1.0 + self.light_cone_solution[q]["redshift"])
+            self.light_cone_solution[q]["box_width_per_angle"] = \
+              (self.cosmology.angular_diameter_distance(self.observer_redshift,
+               self.light_cone_solution[q]["redshift"]) /
+               proper_box_size).in_units("1 / degree")
+
             # Simple error check to make sure more than 100% of box depth
             # is never required.
             if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
@@ -226,10 +232,10 @@
             self._save_light_cone_solution(filename=filename)
 
     def project_light_cone(self, field_of_view, image_resolution, field,
-                           weight_field=None,
+                           weight_field=None, photon_field=False,
                            save_stack=True, save_final_image=True,
                            save_slice_images=False,
-                           cmap_name="algae", photon_field=False,
+                           cmap_name="algae",
                            njobs=1, dynamic=False):
         r"""Create projections for light cone, then add them together.
 
@@ -245,6 +251,11 @@
             the weight field of the projection.  This has the same meaning as
             in standard projections.
             Default: None.
+        photon_field : bool
+            if True, the projection data for each slice is decremented by 4 Pi
+            R^2`, where R is the luminosity distance between the observer and
+            the slice redshift.
+            Default: False.
         save_stack : bool
             if True, the light cone data including each individual
             slice is written to an hdf5 file.
@@ -258,11 +269,6 @@
         cmap_name : string
             color map for images.
             Default: "algae".
-        photon_field : bool
-            if True, the projection data for each slice is decremented by 4 Pi
-            R^2`, where R is the luminosity distance between the observer and
-            the slice redshift.
-            Default: False.
         njobs : int
             The number of parallel jobs over which the light cone projection
             will be split.  Choose -1 for one processor per individual
@@ -309,10 +315,8 @@
             # requested image size.
             proper_box_size = self.simulation.box_size / \
               (1.0 + output["redshift"])
-            output["box_width_fraction"] = \
-              (self.cosmology.angular_diameter_distance(self.observer_redshift,
-               output["redshift"]) * field_of_view /
-               proper_box_size).in_units("")
+            output["box_width_fraction"] = (output["box_width_per_angle"] *
+                                            field_of_view).in_units("")
             
             frb = _light_cone_projection(output, field, pixels,
                                          weight_field=weight_field)
@@ -392,13 +396,14 @@
         mylog.info("Saving light cone solution to %s." % filename)
 
         f = open(filename, "w")
-        f.write("parameter_filename = %s\n" % self.parameter_filename)
+        f.write("# parameter_filename = %s\n" % self.parameter_filename)
         f.write("\n")
+        f.write("# Slice    Dataset    Redshift    depth/box    " + \
+                "width/degree    axis    center\n")
         for q, output in enumerate(self.light_cone_solution):
-            f.write(("Proj %04d, %s, z = %f, depth/box = %f, " +
-                    "width/box = %f, axis = %d, center = %f, %f, %f\n") %
+            f.write(("%04d %s %f %f %f %d %f %f %f\n") %
                     (q, output["filename"], output["redshift"],
-                     output["box_depth_fraction"], output["box_width_fraction"],
+                     output["box_depth_fraction"], output["box_width_per_angle"],
                      output["projection_axis"], output["projection_center"][0],
                      output["projection_center"][1], output["projection_center"][2]))
         f.close()


https://bitbucket.org/yt_analysis/yt/commits/392648265aa8/
Changeset:   392648265aa8
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 18:45:52
Summary:     Removing deleted recipes from listing.
Affected #:  1 file

diff -r 74813a72ea914548267cb8cba75d86b17714d802 -r 392648265aa8fa37a6b3ac9962a9418774f8a0d4 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -37,22 +37,8 @@
 
 .. yt_cookbook:: light_cone_projection.py
 
-Light Cone with Halo Mask
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script combines the light cone generator with the halo profiler to 
-make a light cone projection with all of the halos cut out of the image.
-
-.. yt_cookbook:: light_cone_with_halo_mask.py 
-
-Making Unique Light Cones
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a series of light cone projections
-that only have a maximum amount of volume in common.
-
-.. yt_cookbook:: unique_light_cone_projections.py 
-
-Light Rays
-~~~~~~~~~~
+Light Ray
+~~~~~~~~~
 This script demonstrates how to make a synthetic quasar sight line that 
 extends over multiple datasets and can be used to generate a synthetic 
 absorption spectrum.


https://bitbucket.org/yt_analysis/yt/commits/89113012c7d3/
Changeset:   89113012c7d3
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-09 22:24:06
Summary:     Updating light cone docs.
Affected #:  2 files

diff -r 392648265aa8fa37a6b3ac9962a9418774f8a0d4 -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -2,15 +2,15 @@
 
 Light Cone Generator
 ====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Light cones are projections made by stacking multiple datasets together to 
-continuously span a given redshift interval.  The width of individual 
-projection slices is adjusted such that each slice has the same angular size.  
-Each projection slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  Since deeper 
-images sample earlier epochs of the simulation, light cones represent the 
-closest thing to synthetic imaging observations.
+Light cones are created by stacking multiple datasets together to 
+continuously span a given redshift interval.  To make a projection of a 
+field through a light cone, the width of individual slices is adjusted 
+such that each slice has the same angular size.  
+Each slice is randomly shifted and projected along a random axis to 
+ensure that the same structures are not sampled multiple times.  A 
+recipe for creating a simple light cone projection can be found in 
+the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
@@ -23,46 +23,41 @@
 Configuring the Light Cone Generator
 ------------------------------------
 
-A recipe for creating a simple light cone projection can be found in the 
-cookbook.  The required arguments to instantiate a ``LightCone`` objects are 
+The required arguments to instantiate a ``LightCone`` object are 
 the path to the simulation parameter file, the simulation type, the nearest 
 redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightCone
+  from yt.analysis_modules.cosmological_observation.api import \
+       LightCone
 
   lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                  'Enzo', 0., 0.1)
 
 The additional keyword arguments are:
 
- * **field_of_view_in_arcminutes** (*float*): The field of view of the image 
-   in units of arcminutes.  Default: 600.0.
-
- * **image_resolution_in_arcseconds** (*float*): The size of each image pixel 
-   in units of arcseconds.  Default: 60.0.
-
- * **use_minimum_datasets** (*bool*):  If True, the minimum number of datasets 
-   is used to connect the initial and final redshift.  If false, the light 
-   cone solution will contain as many entries as possible within the redshift 
-   interval.  Default: True.
+ * **use_minimum_datasets** (*bool*):  If True, the minimum number of 
+   datasets is used to connect the initial and final redshift.  If False, 
+   the light cone solution will contain as many entries as possible within 
+   the redshift interval.  Default: True.
 
  * **deltaz_min** (*float*): Specifies the minimum Delta-z between 
    consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This 
-   was invented to allow light cones with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light cones with 
+   thin slices to sample coherent large cale structure, but in practice does 
+   not work so well.  Try setting this parameter to 1 and see what happens.  
+   Default: 0.0.
 
  * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
-   gathering datasets for time series.  Default: True.
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
    pf.parameters.  Default: None.
@@ -76,10 +71,10 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets and the width, depth, 
-center, and axis of the projection to be made for that slice.  The 
-:meth:`LightCone.calculate_light_cone_solution` function is used to 
-calculate the random shifting and projection axis:
+A light cone solution consists of a list of datasets spanning a redshift 
+interval with a random orientation for each dataset.  A new solution 
+is calcuated with the :meth:`LightCone.calculate_light_cone_solution` 
+function:
 
 .. code-block:: python
 
@@ -87,70 +82,39 @@
 
 The keyword argument are:
 
- * **seed** (*int*): the seed for the random number generator.  Any light cone 
-   solution can be reproduced by giving the same random seed.  Default: None 
-   (each solution will be distinct).
+ * **seed** (*int*): the seed for the random number generator.  Any light 
+   cone solution can be reproduced by giving the same random seed.  
+   Default: None.
 
  * **filename** (*str*): if given, a text file detailing the solution will be 
    written out.  Default: None.
 
-If a new solution for the same LightCone object is desired, the 
-:meth:`rerandomize_light_cone_solution` method should be called in place of 
-:meth:`calculate_light_cone_solution`:
-
-.. code-block:: python
-
-  new_seed = 987654321
-  lc.rerandomize_light_cone_solution(new_seed, Recycle=True, 
-                                     filename='new_lightcone.dat')
-
-Additional keyword arguments are:
-
- * **recycle** (*bool*): if True, the new solution will have the same shift in 
-   the line of sight as the original solution.  Since the projections of each 
-   slice are serialized and stored for the entire width of the box (even if 
-   the width used is left than the total box), the projection data can be 
-   deserialized instead of being remade from scratch.  This can greatly speed 
-   up the creation of a large number of light cone projections.  Default: True.
-
- * **filename** (*str*): if given, a text file detailing the solution will be 
-   written out.  Default: None.
-
-If :meth:`rerandomize_light_cone_solution` is used, the LightCone object will 
-keep a copy of the original solution that can be returned to at any time by 
-calling :meth:`restore_master_solution`:
-
-.. code-block:: python
-
-  lc.restore_master_solution()
-
-.. note:: All light cone solutions made with the above method will still use 
-   the same list of datasets.  Only the shifting and projection axis will be 
-   different.
-
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution set, projections can be made of any available 
-field:
+With the light cone solution in place, projections with a given field of 
+view and resolution can be made of any available field:
 
 .. code-block:: python
 
   field = 'density'
-  lc.project_light_cone(field , weight_field=None, 
+  field_of_view = (600.0, "arcmin")
+  resolution = (60.0, "arcsec")
+  lc.project_light_cone(field_of_vew, resolution,
+                        field , weight_field=None, 
                         save_stack=True, 
                         save_slice_images=True)
 
+The field of view and resolution can be specified either as a tuple of 
+value and unit string or as a unitful ``YTQuantity``.  
 Additional keyword arguments:
 
- * **weight_field** (*str*): the weight field of the projection.  This has the 
-   same meaning as in standard projections.  Default: None.
+ * **weight_field** (*str*): the weight field of the projection.  This has 
+   the same meaning as in standard projections.  Default: None.
 
- * **apply_halo_mask** (*bool*): if True, a boolean mask is apply to the light 
-   cone projection.  See below for a description of halo masks.  Default: False.
-
- * **node** (*str*): a prefix to be prepended to the node name under which the 
-   projection data is serialized.  Default: None.
+ * **photon_field** (*bool*): if True, the projection data for each slice is 
+   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+   distance between the observer and the slice redshift.  Default: False.
 
  * **save_stack** (*bool*): if True, the unflatted light cone data including 
    each individual slice is written to an hdf5 file.  Default: True.
@@ -161,13 +125,7 @@
  * **save_slice_images** (*bool*): save images for each individual projection 
    slice.  Default: False.
 
- * **flatten_stack** (*bool*): if True, the light cone stack is continually 
-   flattened each time a slice is added in order to save memory.  This is 
-   generally not necessary.  Default: False.
-
- * **photon_field** (*bool*): if True, the projection data for each slice is 
-   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
-   distance between the observer and the slice redshift.  Default: False.
+ * **cmap_name** (*string*): color map for images.  Default: "algae".
 
  * **njobs** (*int*): The number of parallel jobs over which the light cone 
    projection will be split.  Choose -1 for one processor per individual
@@ -177,34 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Sampling Unique Light Cone Volumes
-----------------------------------
-
-When making a large number of light cones, particularly for statistical 
-analysis, it is important to have a handle on the amount of sampled volume in 
-common from one projection to another.  Any statistics may untrustworthy if a 
-set of light cones have too much volume in common, even if they may all be 
-entirely different in appearance.  LightCone objects have the ability to 
-calculate the volume in common between two solutions with the same dataset 
-ist.  The :meth:`find_unique_solutions` and 
-:meth:`project_unique_light_cones` functions can be used to create a set of 
-light cone solutions that have some maximum volume in common and create light 
-cone projections for those solutions.  If specified, the code will attempt to 
-use recycled solutions that can use the same serialized projection objects 
-that have already been created.  This can greatly increase the speed of making 
-multiple light cone projections.  See the cookbook for an example of doing this.
-
-Making Light Cones with a Halo Mask
------------------------------------
-
-The situation may arise where it is necessary or desirable to know the 
-location of halos within the light cone volume, and specifically their 
-location in the final image.  This can be useful for developing algorithms to 
-find galaxies or clusters in image data.  The light cone generator does this 
-by running the HaloProfiler (see :ref:`halo_profiling`) on each of the 
-datasets used in the light cone and shifting them accordingly with the light 
-cone solution.  The ability also exists to create a boolean mask with the 
-dimensions of the final light cone image that can be used to mask out the 
-halos in the image.  It is left as an exercise to the reader to find a use for 
-this functionality.  This process is somewhat complicated, but not terribly.  
-See the recipe in the cookbook for an example of this functionality.
+.. note:: As of verion 3.0, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

diff -r 392648265aa8fa37a6b3ac9962a9418774f8a0d4 -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -29,6 +29,8 @@
 
 .. yt_cookbook:: halo_merger_tree.py
 
+.. _cookbook-light_cone:
+
 Light Cone Projection
 ~~~~~~~~~~~~~~~~~~~~~
 This script creates a light cone projection, a synthetic observation 


https://bitbucket.org/yt_analysis/yt/commits/589e898c22f7/
Changeset:   589e898c22f7
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-10 13:03:04
Summary:     Updating docs and recipes.
Affected #:  5 files

diff -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d -r 589e898c22f766dfff55337919a1a57bff2df956 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -135,4 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-.. note:: As of verion 3.0, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file
+.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

diff -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d -r 589e898c22f766dfff55337919a1a57bff2df956 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -1,20 +1,21 @@
 .. _light-ray-generator:
 
 Light Ray Generator
-====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+===================
 
 Light rays are similar to light cones (:ref:`light-cone-generator`) in how  
 they stack multiple datasets together to span a redshift interval.  Unlike 
-light cones, which which stack randomly oriented projections from each 
+light cones, which stack randomly oriented projections from each 
 dataset to create synthetic images, light rays use thin pencil beams to 
-simulate QSO sight lines.
+simulate QSO sight lines.  A sample script can be found in the cookbook 
+under :ref:`cookbook-light_ray`.
 
 .. image:: _images/lightray.png
 
-A ray segment records the information of all grid cells intersected by the ray 
-as well as the path length, dl, of the ray through the cell.  Column densities 
-can be calculated by multiplying physical densities by the path length.
+A ray segment records the information of all grid cells intersected by the 
+ray as well as the path length, dl, of the ray through the cell.  Column 
+densities can be calculated by multiplying physical densities by the path 
+length.
 
 Configuring the Light Ray Generator
 -----------------------------------
@@ -36,22 +37,22 @@
    ray solution will contain as many entries as possible within the redshift
    interval.  Default: True.
 
- * **deltaz_min** (*float*):  Specifies the minimum Delta-z between consecutive
-   datasets in the returned list.  Default: 0.0.
+ * **deltaz_min** (*float*):  Specifies the minimum Delta-z between 
+   consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This
-   was invented to allow light rays with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light rays with 
+   thin slices to sample coherent large scale structure, but in practice 
+   does not work so well.  Try setting this parameter to 1 and see what 
+   happens.  Default: 0.0.
 
- * **time_data** (*bool*): Whether or not to include time outputs when gathering
-   datasets for time series.  Default: True.
-
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
+ * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
 Making Light Ray Data
 ---------------------
@@ -74,7 +75,21 @@
 
  * **seed** (*int*): Seed for the random number generator.  Default: None.
 
- * **fields** (*list*): A list of fields for which to get data.  Default: None.
+ * **start_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the starting position of the 
+   ray.  Default: None.
+
+ * **end_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the ending position of the ray.
+   Default: None.
+
+ * **trajectory** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The (r, theta, phi) direction of the light ray.  
+   Use either **end_position** or **trajectory**, not both.  
+   Default: None.
+
+ * **fields** (*list*): A list of fields for which to get data.  
+   Default: None.
 
  * **solution_filename** (*string*): Path to a text file where the 
    trajectories of each subray is written out.  Default: None.
@@ -83,51 +98,17 @@
    Default: None.
 
  * **get_los_velocity** (*bool*): If True, the line of sight velocity is 
-   calculated for each point in the ray.  Default: False.
+   calculated for each point in the ray.  Default: True.
 
- * **get_nearest_halo** (*bool*): If True, the HaloProfiler will be used to 
-   calculate the distance and mass of the nearest halo for each point in the
-   ray.  This option requires additional information to be included.  See 
-   the cookbook for an example.  Default: False.
-
- * **nearest_halo_fields** (*list*): A list of fields to be calculated for the 
-   halos nearest to every pixel in the ray.  Default: None.
-
- * **halo_list_file** (*str*): Filename containing a list of halo properties to be used 
-   for getting the nearest halos to absorbers.  Default: None.
-
- * **halo_profiler_parameters** (*dict*): A dictionary of parameters to be 
-   passed to the HaloProfiler to create the appropriate data used to get 
-   properties for the nearest halos.  Default: None.
-
- * **njobs** (*int*): The number of parallel jobs over which the slices for the
-   halo mask will be split.  Choose -1 for one processor per individual slice 
-   and 1 to have all processors work together on each projection.  Default: 1
+ * **njobs** (*int*): The number of parallel jobs over which the slices for 
+   the halo mask will be split.  Choose -1 for one processor per individual 
+   slice and 1 to have all processors work together on each projection.  
+   Default: 1
 
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Getting The Nearest Galaxies
-----------------------------
-
-The light ray tool will use the HaloProfiler to calculate the distance and 
-mass of the nearest halo to that pixel.  In order to do this, a dictionary 
-called halo_profiler_parameters is used to pass instructions to the 
-HaloProfiler.  This dictionary has three additional keywords:
-
- * **halo_profiler_kwargs** (*dict*): A dictionary of standard HaloProfiler 
-   keyword arguments and values to be given to the HaloProfiler.
-
- * **halo_profiler_actions** (*list*): A list of actions to be performed by 
-   the HaloProfiler.  Each item in the list should be a dictionary with the 
-   following entries: "function", "args", and "kwargs", for the function to 
-   be performed, the arguments supplied to that function, and the keyword 
-   arguments.
-
- * **halo_list** (*string*): 'all' to use the full halo list, or 'filtered' 
-   to use the filtered halo list created after calling make_profiles.
-
-See the recipe in the cookbook for am example.
+.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d -r 589e898c22f766dfff55337919a1a57bff2df956 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -39,6 +39,8 @@
 
 .. yt_cookbook:: light_cone_projection.py
 
+.. _cookbook-light_ray:
+
 Light Ray
 ~~~~~~~~~
 This script demonstrates how to make a synthetic quasar sight line that 

diff -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d -r 589e898c22f766dfff55337919a1a57bff2df956 doc/source/cookbook/light_ray.py
--- a/doc/source/cookbook/light_ray.py
+++ b/doc/source/cookbook/light_ray.py
@@ -1,7 +1,7 @@
 import os
 import sys
 import yt
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
+from yt.analysis_modules.cosmological_observation.api import \
      LightRay
 
 # Create a directory for the light rays

diff -r 89113012c7d33a9e6738ed49c4bfcf7f8e47ab5d -r 589e898c22f766dfff55337919a1a57bff2df956 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -236,7 +236,7 @@
         trajectory : list of floats
             Used only if creating a light ray from a single dataset.
             The (r, theta, phi) direction of the light ray.  Use either 
-        end_position or trajectory, not both.
+            end_position or trajectory, not both.
             Default: None.
         fields : list
             A list of fields for which to get data.


https://bitbucket.org/yt_analysis/yt/commits/5a21ea6fa471/
Changeset:   5a21ea6fa471
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-11 17:38:07
Summary:     Fixing cosmology calculator to allow for both angular diameter distance and angular scale and updating light cone.
Affected #:  2 files

diff -r 589e898c22f766dfff55337919a1a57bff2df956 -r 5a21ea6fa4715c7747d9c624351ec3ef9ce780f9 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -179,7 +179,7 @@
             proper_box_size = self.simulation.box_size / \
               (1.0 + self.light_cone_solution[q]["redshift"])
             self.light_cone_solution[q]["box_width_per_angle"] = \
-              (self.cosmology.angular_diameter_distance(self.observer_redshift,
+              (self.cosmology.angular_scale(self.observer_redshift,
                self.light_cone_solution[q]["redshift"]) /
                proper_box_size).in_units("1 / degree")
 

diff -r 589e898c22f766dfff55337919a1a57bff2df956 -r 5a21ea6fa4715c7747d9c624351ec3ef9ce780f9 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -190,6 +190,29 @@
 
     def angular_diameter_distance(self, z_i, z_f):
         r"""
+        Following Hogg (1999), the angular diameter distance is 'the ratio of 
+        an object's physical transverse size to its angular size in radians.'
+
+        Parameters
+        ----------
+        z_i : float
+            The redshift of the observer.
+        z_f : float
+            The redshift of the observed object.
+
+        Examples
+        --------
+
+        >>> co = Cosmology()
+        >>> print co.angular_diameter_distance(0., 1.).in_units("Mpc")
+        
+        """
+        
+        return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
+                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs()
+
+    def angular_scale(self, z_i, z_f):
+        r"""
         The proper transverse distance between two points at redshift z_f 
         observed at redshift z_i per unit of angular separation.
 
@@ -204,13 +227,12 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.angular_diameter_distance(0., 1.).in_units("Mpc/deg")
+        >>> print co.angular_scale(0., 1.).in_units("kpc / arcsec")
         
         """
-        
-        return (self.comoving_transverse_distance(0, z_f) / (1 + z_f) - 
-                self.comoving_transverse_distance(0, z_i) / (1 + z_i)).in_cgs() / \
-                self.quan(1., "radian")
+
+        return self.angular_diameter_distance(z_i, z_f) / \
+          self.quan(1, "radian")
 
     def luminosity_distance(self, z_i, z_f):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/ef7c988b774a/
Changeset:   ef7c988b774a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-14 19:58:22
Summary:     Merged in brittonsmith/yt/yt-3.0 (pull request #997)

LightRay and LightCone 3.0
Affected #:  21 files

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -2,15 +2,15 @@
 
 Light Cone Generator
 ====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Light cones are projections made by stacking multiple datasets together to 
-continuously span a given redshift interval.  The width of individual 
-projection slices is adjusted such that each slice has the same angular size.  
-Each projection slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  Since deeper 
-images sample earlier epochs of the simulation, light cones represent the 
-closest thing to synthetic imaging observations.
+Light cones are created by stacking multiple datasets together to 
+continuously span a given redshift interval.  To make a projection of a 
+field through a light cone, the width of individual slices is adjusted 
+such that each slice has the same angular size.  
+Each slice is randomly shifted and projected along a random axis to 
+ensure that the same structures are not sampled multiple times.  A 
+recipe for creating a simple light cone projection can be found in 
+the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
@@ -23,46 +23,41 @@
 Configuring the Light Cone Generator
 ------------------------------------
 
-A recipe for creating a simple light cone projection can be found in the 
-cookbook.  The required arguments to instantiate a ``LightCone`` objects are 
+The required arguments to instantiate a ``LightCone`` object are 
 the path to the simulation parameter file, the simulation type, the nearest 
 redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightCone
+  from yt.analysis_modules.cosmological_observation.api import \
+       LightCone
 
   lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                  'Enzo', 0., 0.1)
 
 The additional keyword arguments are:
 
- * **field_of_view_in_arcminutes** (*float*): The field of view of the image 
-   in units of arcminutes.  Default: 600.0.
-
- * **image_resolution_in_arcseconds** (*float*): The size of each image pixel 
-   in units of arcseconds.  Default: 60.0.
-
- * **use_minimum_datasets** (*bool*):  If True, the minimum number of datasets 
-   is used to connect the initial and final redshift.  If false, the light 
-   cone solution will contain as many entries as possible within the redshift 
-   interval.  Default: True.
+ * **use_minimum_datasets** (*bool*):  If True, the minimum number of 
+   datasets is used to connect the initial and final redshift.  If False, 
+   the light cone solution will contain as many entries as possible within 
+   the redshift interval.  Default: True.
 
  * **deltaz_min** (*float*): Specifies the minimum Delta-z between 
    consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This 
-   was invented to allow light cones with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light cones with 
+   thin slices to sample coherent large cale structure, but in practice does 
+   not work so well.  Try setting this parameter to 1 and see what happens.  
+   Default: 0.0.
 
  * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
-   gathering datasets for time series.  Default: True.
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
    pf.parameters.  Default: None.
@@ -76,10 +71,10 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets and the width, depth, 
-center, and axis of the projection to be made for that slice.  The 
-:meth:`LightCone.calculate_light_cone_solution` function is used to 
-calculate the random shifting and projection axis:
+A light cone solution consists of a list of datasets spanning a redshift 
+interval with a random orientation for each dataset.  A new solution 
+is calcuated with the :meth:`LightCone.calculate_light_cone_solution` 
+function:
 
 .. code-block:: python
 
@@ -87,70 +82,39 @@
 
 The keyword argument are:
 
- * **seed** (*int*): the seed for the random number generator.  Any light cone 
-   solution can be reproduced by giving the same random seed.  Default: None 
-   (each solution will be distinct).
+ * **seed** (*int*): the seed for the random number generator.  Any light 
+   cone solution can be reproduced by giving the same random seed.  
+   Default: None.
 
  * **filename** (*str*): if given, a text file detailing the solution will be 
    written out.  Default: None.
 
-If a new solution for the same LightCone object is desired, the 
-:meth:`rerandomize_light_cone_solution` method should be called in place of 
-:meth:`calculate_light_cone_solution`:
-
-.. code-block:: python
-
-  new_seed = 987654321
-  lc.rerandomize_light_cone_solution(new_seed, Recycle=True, 
-                                     filename='new_lightcone.dat')
-
-Additional keyword arguments are:
-
- * **recycle** (*bool*): if True, the new solution will have the same shift in 
-   the line of sight as the original solution.  Since the projections of each 
-   slice are serialized and stored for the entire width of the box (even if 
-   the width used is left than the total box), the projection data can be 
-   deserialized instead of being remade from scratch.  This can greatly speed 
-   up the creation of a large number of light cone projections.  Default: True.
-
- * **filename** (*str*): if given, a text file detailing the solution will be 
-   written out.  Default: None.
-
-If :meth:`rerandomize_light_cone_solution` is used, the LightCone object will 
-keep a copy of the original solution that can be returned to at any time by 
-calling :meth:`restore_master_solution`:
-
-.. code-block:: python
-
-  lc.restore_master_solution()
-
-.. note:: All light cone solutions made with the above method will still use 
-   the same list of datasets.  Only the shifting and projection axis will be 
-   different.
-
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution set, projections can be made of any available 
-field:
+With the light cone solution in place, projections with a given field of 
+view and resolution can be made of any available field:
 
 .. code-block:: python
 
   field = 'density'
-  lc.project_light_cone(field , weight_field=None, 
+  field_of_view = (600.0, "arcmin")
+  resolution = (60.0, "arcsec")
+  lc.project_light_cone(field_of_vew, resolution,
+                        field , weight_field=None, 
                         save_stack=True, 
                         save_slice_images=True)
 
+The field of view and resolution can be specified either as a tuple of 
+value and unit string or as a unitful ``YTQuantity``.  
 Additional keyword arguments:
 
- * **weight_field** (*str*): the weight field of the projection.  This has the 
-   same meaning as in standard projections.  Default: None.
+ * **weight_field** (*str*): the weight field of the projection.  This has 
+   the same meaning as in standard projections.  Default: None.
 
- * **apply_halo_mask** (*bool*): if True, a boolean mask is apply to the light 
-   cone projection.  See below for a description of halo masks.  Default: False.
-
- * **node** (*str*): a prefix to be prepended to the node name under which the 
-   projection data is serialized.  Default: None.
+ * **photon_field** (*bool*): if True, the projection data for each slice is 
+   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+   distance between the observer and the slice redshift.  Default: False.
 
  * **save_stack** (*bool*): if True, the unflatted light cone data including 
    each individual slice is written to an hdf5 file.  Default: True.
@@ -161,13 +125,7 @@
  * **save_slice_images** (*bool*): save images for each individual projection 
    slice.  Default: False.
 
- * **flatten_stack** (*bool*): if True, the light cone stack is continually 
-   flattened each time a slice is added in order to save memory.  This is 
-   generally not necessary.  Default: False.
-
- * **photon_field** (*bool*): if True, the projection data for each slice is 
-   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
-   distance between the observer and the slice redshift.  Default: False.
+ * **cmap_name** (*string*): color map for images.  Default: "algae".
 
  * **njobs** (*int*): The number of parallel jobs over which the light cone 
    projection will be split.  Choose -1 for one processor per individual
@@ -177,34 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Sampling Unique Light Cone Volumes
-----------------------------------
-
-When making a large number of light cones, particularly for statistical 
-analysis, it is important to have a handle on the amount of sampled volume in 
-common from one projection to another.  Any statistics may untrustworthy if a 
-set of light cones have too much volume in common, even if they may all be 
-entirely different in appearance.  LightCone objects have the ability to 
-calculate the volume in common between two solutions with the same dataset 
-ist.  The :meth:`find_unique_solutions` and 
-:meth:`project_unique_light_cones` functions can be used to create a set of 
-light cone solutions that have some maximum volume in common and create light 
-cone projections for those solutions.  If specified, the code will attempt to 
-use recycled solutions that can use the same serialized projection objects 
-that have already been created.  This can greatly increase the speed of making 
-multiple light cone projections.  See the cookbook for an example of doing this.
-
-Making Light Cones with a Halo Mask
------------------------------------
-
-The situation may arise where it is necessary or desirable to know the 
-location of halos within the light cone volume, and specifically their 
-location in the final image.  This can be useful for developing algorithms to 
-find galaxies or clusters in image data.  The light cone generator does this 
-by running the HaloProfiler (see :ref:`halo_profiling`) on each of the 
-datasets used in the light cone and shifting them accordingly with the light 
-cone solution.  The ability also exists to create a boolean mask with the 
-dimensions of the final light cone image that can be used to mask out the 
-halos in the image.  It is left as an exercise to the reader to find a use for 
-this functionality.  This process is somewhat complicated, but not terribly.  
-See the recipe in the cookbook for an example of this functionality.
+.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -1,20 +1,21 @@
 .. _light-ray-generator:
 
 Light Ray Generator
-====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+===================
 
 Light rays are similar to light cones (:ref:`light-cone-generator`) in how  
 they stack multiple datasets together to span a redshift interval.  Unlike 
-light cones, which which stack randomly oriented projections from each 
+light cones, which stack randomly oriented projections from each 
 dataset to create synthetic images, light rays use thin pencil beams to 
-simulate QSO sight lines.
+simulate QSO sight lines.  A sample script can be found in the cookbook 
+under :ref:`cookbook-light_ray`.
 
 .. image:: _images/lightray.png
 
-A ray segment records the information of all grid cells intersected by the ray 
-as well as the path length, dl, of the ray through the cell.  Column densities 
-can be calculated by multiplying physical densities by the path length.
+A ray segment records the information of all grid cells intersected by the 
+ray as well as the path length, dl, of the ray through the cell.  Column 
+densities can be calculated by multiplying physical densities by the path 
+length.
 
 Configuring the Light Ray Generator
 -----------------------------------
@@ -36,22 +37,22 @@
    ray solution will contain as many entries as possible within the redshift
    interval.  Default: True.
 
- * **deltaz_min** (*float*):  Specifies the minimum Delta-z between consecutive
-   datasets in the returned list.  Default: 0.0.
+ * **deltaz_min** (*float*):  Specifies the minimum Delta-z between 
+   consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This
-   was invented to allow light rays with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light rays with 
+   thin slices to sample coherent large scale structure, but in practice 
+   does not work so well.  Try setting this parameter to 1 and see what 
+   happens.  Default: 0.0.
 
- * **time_data** (*bool*): Whether or not to include time outputs when gathering
-   datasets for time series.  Default: True.
-
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
+ * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
 Making Light Ray Data
 ---------------------
@@ -74,7 +75,21 @@
 
  * **seed** (*int*): Seed for the random number generator.  Default: None.
 
- * **fields** (*list*): A list of fields for which to get data.  Default: None.
+ * **start_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the starting position of the 
+   ray.  Default: None.
+
+ * **end_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the ending position of the ray.
+   Default: None.
+
+ * **trajectory** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The (r, theta, phi) direction of the light ray.  
+   Use either **end_position** or **trajectory**, not both.  
+   Default: None.
+
+ * **fields** (*list*): A list of fields for which to get data.  
+   Default: None.
 
  * **solution_filename** (*string*): Path to a text file where the 
    trajectories of each subray is written out.  Default: None.
@@ -83,51 +98,17 @@
    Default: None.
 
  * **get_los_velocity** (*bool*): If True, the line of sight velocity is 
-   calculated for each point in the ray.  Default: False.
+   calculated for each point in the ray.  Default: True.
 
- * **get_nearest_halo** (*bool*): If True, the HaloProfiler will be used to 
-   calculate the distance and mass of the nearest halo for each point in the
-   ray.  This option requires additional information to be included.  See 
-   the cookbook for an example.  Default: False.
-
- * **nearest_halo_fields** (*list*): A list of fields to be calculated for the 
-   halos nearest to every pixel in the ray.  Default: None.
-
- * **halo_list_file** (*str*): Filename containing a list of halo properties to be used 
-   for getting the nearest halos to absorbers.  Default: None.
-
- * **halo_profiler_parameters** (*dict*): A dictionary of parameters to be 
-   passed to the HaloProfiler to create the appropriate data used to get 
-   properties for the nearest halos.  Default: None.
-
- * **njobs** (*int*): The number of parallel jobs over which the slices for the
-   halo mask will be split.  Choose -1 for one processor per individual slice 
-   and 1 to have all processors work together on each projection.  Default: 1
+ * **njobs** (*int*): The number of parallel jobs over which the slices for 
+   the halo mask will be split.  Choose -1 for one processor per individual 
+   slice and 1 to have all processors work together on each projection.  
+   Default: 1
 
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Getting The Nearest Galaxies
-----------------------------
-
-The light ray tool will use the HaloProfiler to calculate the distance and 
-mass of the nearest halo to that pixel.  In order to do this, a dictionary 
-called halo_profiler_parameters is used to pass instructions to the 
-HaloProfiler.  This dictionary has three additional keywords:
-
- * **halo_profiler_kwargs** (*dict*): A dictionary of standard HaloProfiler 
-   keyword arguments and values to be given to the HaloProfiler.
-
- * **halo_profiler_actions** (*list*): A list of actions to be performed by 
-   the HaloProfiler.  Each item in the list should be a dictionary with the 
-   following entries: "function", "args", and "kwargs", for the function to 
-   be performed, the arguments supplied to that function, and the keyword 
-   arguments.
-
- * **halo_list** (*string*): 'all' to use the full halo list, or 'filtered' 
-   to use the filtered halo list created after calling make_profiles.
-
-See the recipe in the cookbook for am example.
+.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -29,6 +29,8 @@
 
 .. yt_cookbook:: halo_merger_tree.py
 
+.. _cookbook-light_cone:
+
 Light Cone Projection
 ~~~~~~~~~~~~~~~~~~~~~
 This script creates a light cone projection, a synthetic observation 
@@ -37,27 +39,15 @@
 
 .. yt_cookbook:: light_cone_projection.py
 
-Light Cone with Halo Mask
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script combines the light cone generator with the halo profiler to 
-make a light cone projection with all of the halos cut out of the image.
+.. _cookbook-light_ray:
 
-.. yt_cookbook:: light_cone_with_halo_mask.py 
+Light Ray
+~~~~~~~~~
+This script demonstrates how to make a synthetic quasar sight line that 
+extends over multiple datasets and can be used to generate a synthetic 
+absorption spectrum.
 
-Making Unique Light Cones
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a series of light cone projections
-that only have a maximum amount of volume in common.
-
-.. yt_cookbook:: unique_light_cone_projections.py 
-
-Making Light Rays
-~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a synthetic quasar sight line and 
-uses the halo profiler to record information about halos close to the 
-line of sight.
-
-.. yt_cookbook:: make_light_ray.py 
+.. yt_cookbook:: light_ray.py 
 
 Creating and Fitting Absorption Spectra
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,12 +1,8 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightCone
 
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Create a LightCone object extending from z = 0 to z = 0.1
-# with a 600 arcminute field of view and a resolution of
-# 60 arcseconds.
+# Create a LightCone object extending from z = 0 to z = 0.1.
 
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
@@ -14,20 +10,21 @@
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                'Enzo', 0., 0.1,
                observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
                time_data=False)
 
 # Calculate a randomization of the solution.
-lc.calculate_light_cone_solution(seed=123456789)
+lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt")
 
 # Choose the field to be projected.
-field = 'SZY'
+field = 'szy'
 
+# Use the LightCone object to make a projection with a 600 arcminute 
+# field of view and a resolution of 60 arcseconds.
 # Set njobs to -1 to have one core work on each projection
-# in parallel.  Set save_slice_images to True to see an
-# image for each individual slice.
-lc.project_light_cone(field, save_stack=False,
+# in parallel.
+lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
+                      weight_field=None,
+                      save_stack=True,
                       save_final_image=True,
-                      save_slice_images=False,
+                      save_slice_images=True,
                       njobs=-1)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ /dev/null
@@ -1,78 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-
-# Instantiate a light cone object as usual.
-lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
-               'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
-               time_data=False,
-               output_dir='LC_HM', output_prefix='LightCone')
-
-# Calculate the light cone solution.
-lc.calculate_light_cone_solution(seed=123456789,
-                                 filename='LC_HM/lightcone.dat')
-
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir': 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Get the halo list for the active solution of this light cone using
-# the HaloProfiler settings set up above.
-# Write the boolean map to an hdf5 file called 'halo_mask.h5'.
-# Write a text file detailing the location, redshift, radius, and mass
-# of each halo in light cone projection.
-lc.get_halo_mask(mask_file='LC_HM/halo_mask.h5',
-                 map_file='LC_HM/halo_map.out',
-                 cube_file='LC_HM/halo_cube.h5',
-                 virial_overdensity=100,
-                 halo_profiler_parameters=halo_profiler_parameters,
-                 njobs=1, dynamic=False)
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make the light cone projection and apply the halo mask.
-pc = lc.project_light_cone(field, save_stack=False,
-                           save_final_image=True,
-                           save_slice_images=False,
-                           apply_halo_mask=True)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/light_ray.py
--- /dev/null
+++ b/doc/source/cookbook/light_ray.py
@@ -0,0 +1,25 @@
+import os
+import sys
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightRay
+
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
+     
+# Create a LightRay object extending from z = 0 to z = 0.1
+# and use only the redshift dumps.
+lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
+              'Enzo', 0.0, 0.1,
+              use_minimum_datasets=True,
+              time_data=False)
+
+# Make a light ray, and set njobs to -1 to use one core
+# per dataset.
+lr.make_light_ray(seed=123456789,
+                  solution_filename='LR/lightraysolution.txt',
+                  data_filename='LR/lightray.h5',
+                  fields=['temperature', 'density'],
+                  get_los_velocity=True,
+                  njobs=-1)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ /dev/null
@@ -1,69 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import os
-import sys
-import yt
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     LightRay
-
-# Create a directory for the light rays
-if not os.path.isdir("LR"): 
-    os.mkdir('LR')
-     
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
-              'Enzo', 0.0, 0.1,
-              use_minimum_datasets=True,
-              time_data=False)
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir' : 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456789,
-                  solution_filename='LR/lightraysolution.txt',
-                  data_filename='LR/lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_nearest_halo=True,
-                  nearest_halo_fields=['TotalMassMsun_100',
-                                       'RadiusMpc_100'],
-                  halo_profiler_parameters=halo_profiler_parameters,
-                  get_los_velocity=True,
-                  njobs=-1)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda doc/source/cookbook/unique_light_cone_projections.py
--- a/doc/source/cookbook/unique_light_cone_projections.py
+++ /dev/null
@@ -1,34 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Instantiate a light cone.
-lc = LightCone("enzo_tiny_cosmology/32Mpc_32.enzo", 'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=120.0,
-               image_resolution_in_arcseconds=60.0,
-               use_minimum_datasets=True,
-               time_data=False,
-               output_dir='LC_U', output_prefix='LightCone')
-
-# Try to find 10 solutions that have at most 10% volume in
-# common and give up after 50 consecutive failed attempts.
-# The recycle=True setting tells the code to first attempt
-# to use solutions with the same projection axes as other
-# solutions.  This will save time when making the projection.
-yt.find_unique_solutions(lc, max_overlap=0.10, failures=50,
-                         seed=123456789, recycle=True,
-                         solutions=10, filename='LC_U/unique.dat')
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make light cone projections with each of the random seeds
-# found above.  All output files will be written with unique
-# names based on the random seed numbers.
-yt.project_unique_light_cones(lc, 'LC_U/unique.dat', field,
-                              save_stack=False,
-                              save_final_image=True,
-                              save_slice_images=False)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda yt/analysis_modules/cosmological_observation/api.py
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -17,9 +17,7 @@
     CosmologySplice
 
 from .light_cone.api import \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones
+    LightCone
 
 from .light_ray.api import \
     LightRay

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -78,8 +78,9 @@
 
         Examples
         --------
-        >>> cosmo = es.create_cosmology_splice(1.0, 0.0, minimal=True,
-                                               deltaz_min=0.0)
+
+        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
+        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)
 
         """
 
@@ -133,12 +134,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (np.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.abs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.abs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -153,20 +154,20 @@
 
                     if current_slice is cosmology_splice[-1]:
                         near_redshift = cosmology_splice[-1]['redshift'] - \
-                          cosmology_splice[-1]['deltazMax']
+                          cosmology_splice[-1]['dz_max']
                         mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                         break
                     else:
                         cosmology_splice.append(current_slice)
 
                 z = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
-                                                                    obj['redshift']))
+            self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
+                                                           obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
 
@@ -175,14 +176,14 @@
                 if (nextOutput['redshift'] <= near_redshift):
                     break
                 if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
-                    cosmology_splice[-1]['deltazMin']):
+                    cosmology_splice[-1]['dz_min']):
                     cosmology_splice.append(nextOutput)
                 nextOutput = nextOutput['next']
             if (cosmology_splice[-1]['redshift'] -
-                cosmology_splice[-1]['deltazMax']) > near_redshift:
+                cosmology_splice[-1]['dz_max']) > near_redshift:
                 mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                 near_redshift = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
@@ -253,7 +254,7 @@
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
-            outputs.append({'redshift': z, 'deltazMax': deltaz_max})
+            outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
         mylog.info("%d data dumps will be needed to get from z = %f to %f." %
@@ -282,28 +283,24 @@
             # at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.1 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
-            output['deltazMax'] = np.fabs(z2 - z)
-
+            output['dz_max'] = np.abs(z2 - z)
+            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
@@ -322,28 +319,24 @@
             # top grid pixel at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.01 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
+            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,24 +350,20 @@
         # box at a given redshift.
         z1 = z
         z2 = z1 - 0.1 # just an initial guess
-        distance1 = 0.0
+        distance1 = self.simulation.quan(0.0, "Mpccm / h")
+        distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1
 
-        # Convert comoving radial distance into Mpc / h,
-        # since that's how box size is stored.
-        distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-          self.cosmology.hubble_constant
-
-        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
-            z2 = ((target_distance - distance2) / m) + z2
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.cosmology.hubble_constant
+            z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration += 1
             if (iteration > max_Iterations):
-                mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, np.fabs(z2 - z)))
+                mylog.error("deltaz_forward: Warning - max iterations " +
+                            "exceeded for z = %f (delta z = %f)." %
+                            (z, np.abs(z2 - z)))
                 break
-        return np.fabs(z2 - z)
+        return np.abs(z2 - z)

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda yt/analysis_modules/cosmological_observation/light_cone/api.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/api.py
@@ -1,5 +1,5 @@
 """
-API for lightcone
+API for light_cone
 
 
 
@@ -15,7 +15,3 @@
 
 from .light_cone import \
     LightCone
-
-from .unique_solution import \
-    project_unique_light_cones, \
-    find_unique_solutions

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Function to calculate volume in common between two n-cubes, with optional
-periodic boundary conditions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-def common_volume(n_cube_1, n_cube_2, periodic=None):
-    "Return the n-volume in common between the two n-cubes."
-
-    # Check for proper args.
-    if ((len(np.shape(n_cube_1)) != 2) or
-        (np.shape(n_cube_1)[1] != 2) or
-        (np.shape(n_cube_1) != np.shape(n_cube_2))):
-        print "Arguments must be 2 (n, 2) numpy array."
-        return 0
-
-    if ((periodic is not None) and
-        (np.shape(n_cube_1) != np.shape(periodic))):
-        print "periodic argument must be (n, 2) numpy array."
-        return 0
-
-    nCommon = 1.0
-    for q in range(np.shape(n_cube_1)[0]):
-        if (periodic is None):
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
-        else:
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q],
-                                      periodic=periodic[q])
-
-    return nCommon
-
-def common_segment(seg1, seg2, periodic=None):
-    "Return the length of the common segment."
-
-    # Check for proper args.
-    if ((len(seg1) != 2) or (len(seg2) != 2)):
-        print "Arguments must be arrays of size 2."
-        return 0
-
-    # If not periodic, then this is very easy.
-    if periodic is None:
-        seg1.sort()
-        len1 = seg1[1] - seg1[0]
-        seg2.sort()
-        len2 = seg2[1] - seg2[0]
-
-        common = 0.0
-
-        add = seg1[1] - seg2[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        add = seg2[1] - seg1[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        common = min(common, len1, len2)
-        return common
-
-    # If periodic, it's a little more complicated.
-    else:
-        if len(periodic) != 2:
-            print "periodic array must be of size 2."
-            return 0
-
-        seg1.sort()
-        flen1 = seg1[1] - seg1[0]
-        len1 = flen1 - int(flen1)
-        seg2.sort()
-        flen2 = seg2[1] - seg2[0]
-        len2 = flen2 - int(flen2)
-
-        periodic.sort()
-        scale = periodic[1] - periodic[0]
-
-        if (abs(int(flen1)-int(flen2)) >= scale):
-            return min(flen1, flen2)
-
-        # Adjust for periodicity
-        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
-        seg1[1] = seg1[0] + len1
-        if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
-        seg2[1] = seg2[0] + len2
-        if (seg2[1] > periodic[1]): seg2[1] -= scale
-
-        # create list of non-periodic segments
-        pseg1 = []
-        if (seg1[0] >= seg1[1]):
-            pseg1.append([seg1[0], periodic[1]])
-            pseg1.append([periodic[0], seg1[1]])
-        else:
-            pseg1.append(seg1)
-        pseg2 = []
-        if (seg2[0] >= seg2[1]):
-            pseg2.append([seg2[0], periodic[1]])
-            pseg2.append([periodic[0], seg2[1]])
-        else:
-            pseg2.append(seg2)
-
-        # Add up common segments.
-        common = min(int(flen1), int(flen2))
-
-        for subseg1 in pseg1:
-            for subseg2 in pseg2:
-                common += common_segment(subseg1, subseg2)
-
-        return common

diff -r 8f2015a0da2717edbb82f2dbd4f2073b0d95b63c -r ef7c988b774a159c854d7a2fefc000e767007eda yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Light cone halo mask functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import h5py
-import numpy as np
-
-from yt.funcs import *
-from yt.analysis_modules.halo_profiler.api import \
-     HaloProfiler
-from yt.convenience import load
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_objects, \
-     parallel_root_only
-
-def _light_cone_halo_mask(lightCone, cube_file=None,
-                          mask_file=None, map_file=None,
-                          halo_profiler_parameters=None,
-                          virial_overdensity=200,
-                          njobs=1, dynamic=False):
-    "Make a boolean mask to cut clusters out of light cone projections."
-
-    if halo_profiler_parameters is None:
-        halo_profiler_parameters = {}
-
-    pixels = int(lightCone.field_of_view_in_arcminutes * 60.0 /
-                 lightCone.image_resolution_in_arcseconds)
-
-    # Loop through files in light cone solution and get virial quantities.
-    halo_map_storage = {}
-    for my_storage, my_slice in \
-      parallel_objects(lightCone.light_cone_solution,
-                       njobs=njobs, dynamic=dynamic,
-                       storage=halo_map_storage):
-        halo_list = _get_halo_list(my_slice['filename'],
-                                   **halo_profiler_parameters)
-        my_storage.result = \
-          {'mask': _make_slice_mask(my_slice, halo_list, pixels,
-                                    virial_overdensity)}
-        if map_file is not None:
-            my_storage.result['map'] = \
-              _make_slice_halo_map(my_slice, halo_list,
-                                   virial_overdensity)
-
-    # Reassemble halo mask and map lists.
-    light_cone_mask = []
-    halo_map = []
-    all_slices = halo_map_storage.keys()
-    all_slices.sort()
-    for i in all_slices:
-        light_cone_mask.append(halo_map_storage[i]['mask'])
-        if map_file is not None:
-            halo_map.extend(halo_map_storage[i]['map'])
-    del halo_map_storage
-
-    # Write out cube of masks from each slice.
-    if cube_file is not None:
-        _write_halo_mask(cube_file, np.array(light_cone_mask))
-
-    # Write out a text list of all halos in the image.
-    if map_file is not None:
-        _write_halo_map(map_file, halo_map)
-
-    # Write out final mask.
-    if mask_file is not None:
-        # Final mask is simply the product of the mask from each slice.
-        final_mask = np.ones(shape=(pixels, pixels))
-        for mask in light_cone_mask:
-            final_mask *= mask
-        _write_halo_mask(mask_file, final_mask)
-
-    return light_cone_mask
-
- at parallel_root_only
-def _write_halo_mask(filename, halo_mask):
-    r"""Write out an hdf5 file with the halo mask that
-    can be applied to an image.
-    """
-
-    mylog.info("Saving halo mask to %s." % filename)
-    output = h5py.File(filename, 'a')
-    if 'HaloMask' in output.keys():
-        del output['HaloMask']
-    output.create_dataset('HaloMask', data=np.array(halo_mask))
-    output.close()
-
- at parallel_root_only
-def _write_halo_map(filename, halo_map):
-    "Write a text list of halos in a light cone image."
-
-    mylog.info("Saving halo map to %s." % filename)
-    f = open(filename, 'w')
-    f.write("#z       x         y        r_image   r_mpc     m_Msun\n")
-    for halo in halo_map:
-        f.write("%7.4f %9.6f %9.6f %9.3e %9.3e %9.3e\n" % \
-                    (halo['redshift'], halo['x'], halo['y'],
-                     halo['radius_image'], halo['radius_mpc'],
-                     halo['mass']))
-    f.close()
-
-def _get_halo_list(dataset, halo_profiler_kwargs=None,
-                   halo_profiler_actions=None, halo_list='all'):
-    "Load a list of halos for the dataset."
-
-    if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
-    if halo_profiler_actions is None: halo_profiler_actions = []
-
-    hp = HaloProfiler(dataset, **halo_profiler_kwargs)
-    for action in halo_profiler_actions:
-        if not action.has_key('args'): action['args'] = ()
-        if not action.has_key('kwargs'): action['kwargs'] = {}
-        action['function'](hp, *action['args'], **action['kwargs'])
-
-    if halo_list == 'all':
-        return_list = copy.deepcopy(hp.all_halos)
-    elif halo_list == 'filtered':
-        return_list = copy.deepcopy(hp.filtered_halos)
-    else:
-        mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-        return_list = None
-
-    del hp
-    return return_list
-
-def _make_slice_mask(slice, halo_list, pixels, virial_overdensity):
-    "Make halo mask for one slice in light cone solution."
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Make boolean mask and cut out halos.
-    dx = slice['box_width_fraction'] / pixels
-    x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
-
-    # Cut out any pixel that has any part at all in the circle.
-    for q in range(len(all_halo_radius)):
-        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
-                              np.array(range(pixels))) != 0
-        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
-                              np.array(range(pixels))) != 0
-
-        xDistance = (np.abs(x - all_halo_x[q]) -
-                     (0.5 * dx)) * dif_xIndex
-        yDistance = (np.abs(x - all_halo_y[q]) -
-                     (0.5 * dx)) * dif_yIndex
-
-        distance = np.array([np.sqrt(w**2 + xDistance**2)
-                             for w in yDistance])
-        haloMask *= (distance >= all_halo_radius[q])
-
-    return haloMask
-
-def _make_slice_halo_map(slice, halo_list, virial_overdensity):
-    "Make list of halos for one slice in light cone solution."
-
-    # Get units to convert virial radii back to physical units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Construct list of halos
-    halo_map = []
-
-    for q in range(len(all_halo_x)):
-        # Give radius in both physics units and
-        # units of the image (0 to 1).
-        radius_mpc = all_halo_radius[q] * Mpc_units
-        radius_image = all_halo_radius[q] / slice['box_width_fraction']
-
-        halo_map.append({'x': all_halo_x[q] / slice['box_width_fraction'],
-                         'y': all_halo_y[q] / slice['box_width_fraction'],
-                         'redshift': slice['redshift'],
-                         'radius_mpc': radius_mpc,
-                         'radius_image': radius_image,
-                         'mass': all_halo_mass[q]})
-
-    return halo_map
-
-def _make_slice_halo_list(slice, halo_list, virial_overdensity):
-    "Make shifted, tiled list of halos for halo mask and halo map."
-
-   # Make numpy arrays for halo centers and virial radii.
-    halo_x = []
-    halo_y = []
-    halo_depth = []
-    halo_radius = []
-    halo_mass = []
-
-    # Get units to convert virial radii to code units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    for halo in halo_list:
-        if halo is not None:
-            center = copy.deepcopy(halo['center'])
-            halo_depth.append(center.pop(slice['projection_axis']))
-            halo_x.append(center[0])
-            halo_y.append(center[1])
-            halo_radius.append(halo['RadiusMpc_%d' % virial_overdensity] /
-                               Mpc_units)
-            halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
-
-    halo_x = np.array(halo_x)
-    halo_y = np.array(halo_y)
-    halo_depth = np.array(halo_depth)
-    halo_radius = np.array(halo_radius)
-    halo_mass = np.array(halo_mass)
-
-    # Adjust halo centers along line of sight.
-    depth_center = slice['projection_center'][slice['projection_axis']]
-    depth_left = depth_center - 0.5 * slice['box_depth_fraction']
-    depth_right = depth_center + 0.5 * slice['box_depth_fraction']
-
-    # Make boolean mask to pick out centers in region along line of sight.
-    # Halos near edges may wrap around to other side.
-    add_left = (halo_depth + halo_radius) > 1 # should be box width
-    add_right = (halo_depth - halo_radius) < 0
-
-    halo_depth = np.concatenate([halo_depth,
-                                 (halo_depth[add_left]-1),
-                                 (halo_depth[add_right]+1)])
-    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = np.concatenate([halo_radius,
-                                  halo_radius[add_left],
-                                  halo_radius[add_right]])
-    halo_mass = np.concatenate([halo_mass,
-                                halo_mass[add_left],
-                                halo_mass[add_right]])
-
-    del add_left, add_right
-
-    # Cut out the halos outside the region of interest.
-    if (slice['box_depth_fraction'] < 1):
-        if (depth_left < 0):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right)) | \
-                ((halo_depth + halo_radius >= depth_left + 1) &
-                 (halo_depth - halo_radius <= 1))
-        elif (depth_right > 1):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right - 1)) | \
-                ((halo_depth + halo_radius >= depth_left) &
-                 (halo_depth - halo_radius <= 1))
-        else:
-            mask = (halo_depth + halo_radius >= depth_left) & \
-              (halo_depth - halo_radius <= depth_right)
-
-        halo_x = halo_x[mask]
-        halo_y = halo_y[mask]
-        halo_radius = halo_radius[mask]
-        halo_mass = halo_mass[mask]
-        del mask
-    del halo_depth
-
-    all_halo_x = np.array([])
-    all_halo_y = np.array([])
-    all_halo_radius = np.array([])
-    all_halo_mass = np.array([])
-
-    # Tile halos of width box fraction is greater than one.
-    # Copy original into offset positions to make tiles.
-    for x in range(int(np.ceil(slice['box_width_fraction']))):
-        for y in range(int(np.ceil(slice['box_width_fraction']))):
-            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
-
-    del halo_x, halo_y, halo_radius, halo_mass
-
-    # Shift centers laterally.
-    offset = copy.deepcopy(slice['projection_center'])
-    del offset[slice['projection_axis']]
-
-    # Shift x and y positions.
-    all_halo_x -= offset[0]
-    all_halo_y -= offset[1]
-
-    # Wrap off-edge centers back around to
-    # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
-
-    # After shifting, some centers have fractional coverage
-    # on both sides of the box.
-    # Find those centers and make copies to be placed on the other side.
-
-    # Centers hanging off the right edge.
-    add_x_right = all_halo_x + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
-    add_x_halo_y = all_halo_y[add_x_right]
-    add_x_halo_radius = all_halo_radius[add_x_right]
-    add_x_halo_mass = all_halo_mass[add_x_right]
-    del add_x_right
-
-    # Centers hanging off the left edge.
-    add_x_left = all_halo_x - all_halo_radius < 0
-    add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
-    add2_x_halo_y = all_halo_y[add_x_left]
-    add2_x_halo_radius = all_halo_radius[add_x_left]
-    add2_x_halo_mass = all_halo_mass[add_x_left]
-    del add_x_left
-
-    # Centers hanging off the top edge.
-    add_y_right = all_halo_y + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_y_halo_x = all_halo_x[add_y_right]
-    add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
-    add_y_halo_radius = all_halo_radius[add_y_right]
-    add_y_halo_mass = all_halo_mass[add_y_right]
-    del add_y_right
-
-    # Centers hanging off the bottom edge.
-    add_y_left = all_halo_y - all_halo_radius < 0
-    add2_y_halo_x = all_halo_x[add_y_left]
-    add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
-    add2_y_halo_radius = all_halo_radius[add_y_left]
-    add2_y_halo_mass = all_halo_mass[add_y_left]
-    del add_y_left
-
-    # Add the hanging centers back to the projection data.
-    all_halo_x = np.concatenate([all_halo_x,
-                                 add_x_halo_x, add2_x_halo_x,
-                                 add_y_halo_x, add2_y_halo_x])
-    all_halo_y = np.concatenate([all_halo_y,
-                                 add_x_halo_y, add2_x_halo_y,
-                                 add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = np.concatenate([all_halo_radius,
-                                      add_x_halo_radius,
-                                      add2_x_halo_radius,
-                                      add_y_halo_radius,
-                                      add2_y_halo_radius])
-    all_halo_mass = np.concatenate([all_halo_mass,
-                                    add_x_halo_mass,
-                                    add2_x_halo_mass,
-                                    add_y_halo_mass,
-                                    add2_y_halo_mass])
-
-    del add_x_halo_x, add_x_halo_y, add_x_halo_radius
-    del add2_x_halo_x, add2_x_halo_y, add2_x_halo_radius
-    del add_y_halo_x, add_y_halo_y, add_y_halo_radius
-    del add2_y_halo_x, add2_y_halo_y, add2_y_halo_radius
-
-    # Cut edges to proper width.
-    cut_mask = (all_halo_x - all_halo_radius <
-                slice['box_width_fraction']) & \
-        (all_halo_y - all_halo_radius <
-         slice['box_width_fraction'])
-    all_halo_x = all_halo_x[cut_mask]
-    all_halo_y = all_halo_y[cut_mask]
-    all_halo_radius = all_halo_radius[cut_mask]
-    all_halo_mass = all_halo_mass[cut_mask]
-    del cut_mask
-
-    return (all_halo_x, all_halo_y,
-            all_halo_radius, all_halo_mass)

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list