[Yt-svn] commit/yt: 12 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Mar 15 11:35:57 PDT 2011


12 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/6dc250784d2e/
changeset:   r3821:6dc250784d2e
branch:      yt
user:        brittonsmith
date:        2011-03-04 01:04:27
summary:     Added Dust_Temperature field.
affected #:  1 file (524 bytes)

--- a/yt/frontends/enzo/fields.py	Sun Feb 27 22:33:20 2011 -0500
+++ b/yt/frontends/enzo/fields.py	Thu Mar 03 19:04:27 2011 -0500
@@ -240,7 +240,7 @@
 _default_fields = ["Density","Temperature",
                    "x-velocity","y-velocity","z-velocity",
                    "x-momentum","y-momentum","z-momentum",
-                   "Bx", "By", "Bz"]
+                   "Bx", "By", "Bz", "Dust_Temperature_Density"]
 # else:
 #     _default_fields = ["Density","Temperature","Gas_Energy","Total_Energy",
 #                        "x-velocity","y-velocity","z-velocity"]
@@ -280,6 +280,17 @@
     f._convert_function = _convertVelocity
     f.take_log = False
 
+# Dust temperature - raw field is T_dust * Density
+def _dust_temperature(field, data):
+    return data['Dust_Temperature_Density'] / data['Density']
+def _convert_dust_temperature(data):
+    ef = (1.0 + data.pf.current_redshift)**3.0
+    return data.convert("Density") / ef
+add_field("Dust_Temperature", function=_dust_temperature, 
+          convert_function=_convert_dust_temperature, take_log=True,
+          validators=[ValidateDataField('Dust_Temperature_Density')],
+          units = r"K")
+
 def _spdensity(field, data):
     blank = na.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank


http://bitbucket.org/yt_analysis/yt/changeset/24c3450525a4/
changeset:   r3822:24c3450525a4
branch:      yt
user:        brittonsmith
date:        2011-03-04 03:08:53
summary:     Merged with main repo.
affected #:  0 files (0 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Thu Mar 03 19:04:27 2011 -0500
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Thu Mar 03 21:08:53 2011 -0500
@@ -377,8 +377,8 @@
         # Find the distances to the particles. I don't like this much, but I
         # can't see a way to eliminate a loop like this, either here or in
         # yt.math.
-        for pos in izip(self["particle_position_x"], self["particle_position_y"],
-                self["particle_position_z"]):
+        for pos in itertools.izip(self["particle_position_x"],
+                self["particle_position_y"], self["particle_position_z"]):
             dist[mark] = periodic_dist(cen, pos, period)
             mark += 1
         # Set up the radial bins.
@@ -720,8 +720,8 @@
             # Find the distances to the particles. I don't like this much, but I
             # can't see a way to eliminate a loop like this, either here or in
             # yt.math.
-            for pos in izip(self["particle_position_x"], self["particle_position_y"],
-                    self["particle_position_z"]):
+            for pos in itertools.izip(self["particle_position_x"],
+                    self["particle_position_y"], self["particle_position_z"]):
                 dist[mark] = periodic_dist(cen, pos, period)
                 mark += 1
             dist_min, dist_max = min(dist), max(dist)


--- a/yt/data_objects/universal_fields.py	Thu Mar 03 19:04:27 2011 -0500
+++ b/yt/data_objects/universal_fields.py	Thu Mar 03 21:08:53 2011 -0500
@@ -289,9 +289,9 @@
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Entropy(field, data):
-    return (kboltz/mh) * data["Temperature"] / \
-           (data["MeanMolecularWeight"] * data["Density"]**(2./3.))
-add_field("Entropy", units=r"\rm{ergs}\/\rm{cm}^{2}",
+    return kboltz  * data["Temperature"] / \
+           (data["NumberDensity"]**(data.pf["Gamma"] - 1.0))
+add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{2}",
           function=_Entropy)
 
 def _Height(field, data):


--- a/yt/utilities/logger.py	Thu Mar 03 19:04:27 2011 -0500
+++ b/yt/utilities/logger.py	Thu Mar 03 21:08:53 2011 -0500
@@ -73,11 +73,11 @@
 original_emitter = logging.StreamHandler.emit
 def colorize_logging():
     f = logging.Formatter(cfstring)
-    rootLogger.handlers[0].setFormatter(f)
+    if len(rootLogger.handlers) > 0: rootLogger.handlers[0].setFormatter(f)
     logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
 def uncolorize_logging():
     f = logging.Formatter(ufstring)
-    rootLogger.handlers[0].setFormatter(f)
+    if len(rootLogger.handlers) > 0: rootLogger.handlers[0].setFormatter(f)
     logging.StreamHandler.emit = original_emitter
 
 if ytcfg.getboolean("yt","coloredlogs"):


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Mar 03 19:04:27 2011 -0500
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Mar 03 21:08:53 2011 -0500
@@ -69,7 +69,8 @@
         # we reset it again so that it includes the processor.
         f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
                                             yt.utilities.logger.ufstring))
-        yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
+        if len(yt.utilities.logger.rootLogger.handlers) > 0:
+            yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
         if ytcfg.getboolean("yt", "parallel_traceback"):
             sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
     if ytcfg.getint("yt","LogLevel") < 20:


http://bitbucket.org/yt_analysis/yt/changeset/fbbcc2ac166c/
changeset:   r3823:fbbcc2ac166c
branch:      yt
user:        brittonsmith
date:        2011-03-11 16:26:28
summary:     Merged.
affected #:  0 files (0 bytes)

--- a/yt/analysis_modules/api.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/analysis_modules/api.py	Fri Mar 11 10:26:28 2011 -0500
@@ -35,16 +35,19 @@
     Halo, \
     HOPHalo, \
     parallelHOPHalo, \
+    LoadedHalo, \
     FOFHalo, \
     HaloList, \
     HOPHaloList, \
     FOFHaloList, \
     parallelHOPHaloList, \
+    LoadedHaloList, \
     GenericHaloFinder, \
     parallelHF, \
     HOPHaloFinder, \
     FOFHaloFinder, \
-    HaloFinder
+    HaloFinder, \
+    LoadHaloes
 
 from .halo_mass_function.api import \
     HaloMassFcn, \


--- a/yt/analysis_modules/halo_finding/api.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/analysis_modules/halo_finding/api.py	Fri Mar 11 10:26:28 2011 -0500
@@ -32,13 +32,16 @@
     Halo, \
     HOPHalo, \
     parallelHOPHalo, \
+    LoadedHalo, \
     FOFHalo, \
     HaloList, \
     HOPHaloList, \
     FOFHaloList, \
     parallelHOPHaloList, \
+    LoadedHaloList, \
     GenericHaloFinder, \
     parallelHF, \
     HOPHaloFinder, \
     FOFHaloFinder, \
-    HaloFinder
+    HaloFinder, \
+    LoadHaloes


--- a/yt/analysis_modules/halo_finding/halo_objects.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Fri Mar 11 10:26:28 2011 -0500
@@ -32,12 +32,14 @@
 import numpy as na
 import random
 import sys
+from collections import defaultdict
 
 from yt.funcs import *
 
 from yt.config import ytcfg
 from yt.utilities.performance_counters import \
     yt_counters, time_function
+from yt.utilities.math_utils import periodic_dist
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -73,6 +75,7 @@
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
+        self.pf = self.data.pf
         if indices is not None:
             self.indices = halo_list._base_indices[indices]
         else:
@@ -360,19 +363,20 @@
             return None
         self.bin_count = bins
         # Cosmology
-        h = self.data.pf.hubble_constant
-        Om_matter = self.data.pf.omega_matter
-        z = self.data.pf.current_redshift
+        h = self.pf.hubble_constant
+        Om_matter = self.pf.omega_matter
+        z = self.pf.current_redshift
+        period = self.pf.domain_right_edge - \
+            self.pf.domain_left_edge
+        cm = self.pf["cm"]
+        thissize = max(self.size, self.indices.size)
         rho_crit_now = 1.8788e-29 * h**2.0 * Om_matter # g cm^-3
         Msun2g = 1.989e33
         rho_crit = rho_crit_now * ((1.0 + z)**3.0)
-        
         # Get some pertinent information about the halo.
         self.mass_bins = na.zeros(self.bin_count+1, dtype='float64')
-        dist = na.empty(self.indices.size, dtype='float64')
+        dist = na.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
-        period = self.data.pf.domain_right_edge - \
-            self.data.pf.domain_left_edge
         mark = 0
         # Find the distances to the particles. I don't like this much, but I
         # can't see a way to eliminate a loop like this, either here or in
@@ -397,7 +401,7 @@
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
-        (self.radial_bins * self.data.pf["cm"])**3.0)
+        (self.radial_bins * cm)**3.0)
         
 
 class HOPHalo(Halo):
@@ -785,6 +789,186 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
+class LoadedHalo(Halo):
+    def __init__(self, pf, id, size=None, CoM=None,
+        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        rms_vel=None, fnames=None):
+        self.pf = pf
+        self.id = id
+        self.size = size
+        self.CoM = CoM
+        self.max_dens_point = max_dens_point
+        self.group_total_mass = group_total_mass
+        self.max_radius = max_radius
+        self.bulk_vel = bulk_vel
+        self.rms_vel = rms_vel
+        # locs=the names of the h5 files that have particle data for this halo
+        self.fnames = fnames
+        self.bin_count = None
+        self.overdensity = None
+        self.saved_fields = {}
+        self.particle_mask = None
+        self.ds_sort = None
+        self.indices = na.array([]) # Never used for a LoadedHalo.
+
+    def __getitem__(self, key):
+        # This function will try to get particle data in one of three ways,
+        # in descending preference.
+        # 1. From saved_fields, e.g. we've already got it.
+        # 2. From the halo h5 files off disk.
+        # 3. Use the unique particle indexes of the halo to select a missing
+        # field from an AMR Sphere.
+        try:
+            # We've already got it.
+            return self.saved_fields[key]
+        except KeyError:
+            # Gotta go get it from the halo h5 files.
+            field_data = self._get_particle_data(self.id, self.fnames,
+                self.size, key)
+            #if key == 'particle_position_x': field_data = None
+            if field_data is not None:
+                self.saved_fields[key] = field_data
+                return self.saved_fields[key]
+            else:
+                # Dynamically create the masking array for particles, and get
+                # the data using standard yt methods. The 1.05 is there to
+                # account for possible silliness having to do with whether
+                # the maximum density or center of mass was used to calculate
+                # the maximum radius.
+                ds = self.pf.h.sphere(self.CoM, 1.05 * self.max_radius)
+                if self.particle_mask is None:
+                    pid = self.__getitem__('particle_index')
+                    sp_pid = ds['particle_index']
+                    self.ds_sort = sp_pid.argsort()
+                    sp_pid = sp_pid[self.ds_sort]
+                    # The result of searchsorted is an array with the positions
+                    # of the indexes in pid as they are in sp_pid. This is
+                    # because each element of pid is in sp_pid only once.
+                    self.particle_mask = na.searchsorted(sp_pid, pid)
+                # We won't store this field below in saved_fields because
+                # that would mean keeping two copies of it, one in the yt
+                # machinery and one here.
+                return ds[key][self.ds_sort][self.particle_mask]
+
+    def _get_particle_data(self, halo, fnames, size, field):
+        # Given a list of file names, a halo, its size, and the desired field,
+        # this returns the particle data for that halo.
+        # First get the list of fields from the first file. Not all fields
+        # are saved all the time (e.g. creation_time, particle_type).
+        mylog.info("Getting field %s from hdf5 halo particle files." % field)
+        f = h5py.File(fnames[0])
+        fields = f["Halo%08d" % halo].keys()
+        # If we dont have this field, we can give up right now.
+        if field not in fields: return None
+        if field == 'particle_index' or field == 'particle_type':
+            # the only integer field
+            field_data = na.empty(size, dtype='int64')
+        else:
+            field_data = na.empty(size, dtype='float64')
+        f.close()
+        offset = 0
+        for fname in fnames:
+            f = h5py.File(fname)
+            this = f["Halo%08d" % halo][field][:]
+            s = this.size
+            field_data[offset:offset+s] = this
+            offset += s
+            f.close()
+        return field_data
+        
+    def center_of_mass(self):
+        r"""Calculate and return the center of mass.
+
+        The center of mass of the halo is directly calculated and returned.
+        
+        Examples
+        --------
+        >>> com = halos[0].center_of_mass()
+        """
+        return self.CoM
+    
+    def maximum_density_location(self):
+        r"""Return the location HOP identified as maximally dense.
+        
+        Return the location HOP identified as maximally dense.
+
+        Examples
+        --------
+        >>> max_dens_loc = halos[0].maximum_density_location()
+        """
+        return self.max_dens_point[1:]
+
+    def maximum_density(self):
+        r"""Return the HOP-identified maximum density.
+
+        Return the HOP-identified maximum density.
+
+        Examples
+        --------
+        >>> max_dens = halos[0].maximum_density()
+        """
+        return self.max_dens_point[0]
+
+    def total_mass(self):
+        r"""Returns the total mass in solar masses of the halo.
+        
+        Returns the total mass in solar masses of just the particles in the
+        halo.
+
+        Examples
+        --------
+        >>> halos[0].total_mass()
+        """
+        return self.group_total_mass
+
+    def bulk_velocity(self):
+        r"""Returns the mass-weighted average velocity in cm/s.
+
+        This calculates and returns the mass-weighted average velocity of just
+        the particles in the halo in cm/s.
+        
+        Examples
+        --------
+        >>> bv = halos[0].bulk_velocity()
+        """
+        return self.bulk_vel
+
+    def rms_velocity(self):
+        r"""Returns the mass-weighted RMS velocity for the halo
+        particles in cgs units.
+
+        Calculate and return the mass-weighted RMS velocity for just the
+        particles in the halo.  The bulk velocity of the halo is subtracted
+        before computation.
+        
+        Examples
+        --------
+        >>> rms_vel = halos[0].rms_velocity()
+        """
+        return self.rms_vel
+
+    def maximum_radius(self):
+        r"""Returns the maximum radius in the halo for all particles,
+        either from the point of maximum density or from the
+        center of mass.
+
+        The maximum radius from the most dense point is calculated.  This
+        accounts for periodicity.
+        
+        Parameters
+        ----------
+        center_of_mass : bool
+            True chooses the center of mass when calculating the maximum radius.
+            False chooses from the maximum density location for HOP halos
+            (it has no effect for FOF halos).
+            Default = True.
+        
+        Examples
+        --------
+        >>> radius = halos[0].maximum_radius()
+        """
+        return self.max_radius
+
 class HaloList(object):
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
@@ -1106,6 +1290,52 @@
         """
         HaloList.write_out(self, filename)
 
+class LoadedHaloList(HaloList):
+    _name = "Loaded"
+    
+    def __init__(self, pf, basename):
+        self.pf = pf
+        self._groups = []
+        self.basename = basename
+        self._retrieve_halos()
+    
+    def _retrieve_halos(self):
+        # First get the halo particulars.
+        lines = file("%s.out" % self.basename)
+        # The location of particle data for each halo.
+        locations = self._collect_halo_data_locations()
+        halo = 0
+        for line in lines:
+            # Skip the comment lines at top.
+            if line[0] == "#": continue
+            line = line.split()
+            # get the particle data
+            size = int(line[2])
+            fnames = locations[halo]
+            # Everything else
+            CoM = na.array([float(line[7]), float(line[8]), float(line[9])])
+            max_dens_point = na.array([float(line[3]), float(line[4]),
+                float(line[5]), float(line[6])])
+            group_total_mass = float(line[1])
+            max_radius = float(line[13])
+            bulk_vel = na.array([float(line[10]), float(line[11]),
+                float(line[12])])
+            rms_vel = float(line[14])
+            self._groups.append(LoadedHalo(self.pf, halo, size, CoM,
+                max_dens_point, group_total_mass, max_radius, bulk_vel,
+                rms_vel, fnames))
+            halo += 1
+    
+    def _collect_halo_data_locations(self):
+        # The halos are listed in order in the file.
+        lines = file("%s.txt" % self.basename)
+        locations = []
+        for line in lines:
+            line = line.split()
+            locations.append(line[1:])
+        lines.close()
+        return locations
+
 class parallelHOPHaloList(HaloList,ParallelAnalysisInterface):
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
@@ -1481,6 +1711,32 @@
             if not self._is_mine(halo): continue
             halo.write_particle_list(f)
 
+    def dump(self, basename="HopAnalysis"):
+        r"""Save the full halo data to disk.
+        
+        This function will save the halo data in such a manner that it can be
+        easily re-loaded later using `GenericHaloFinder.load`.
+        This is similar in concept to
+        pickling the data, but outputs the data in the already-established
+        data formats. The simple halo data is written to a text file
+        (e.g. "HopAnalysis.out") using
+        write_out(), and the particle data to hdf5 files (e.g. "HopAnalysis.h5")
+        using write_particle_lists().
+        
+        Parameters
+        ----------
+        basename : String
+            The base name for the files the data will be written to. Default = 
+            "HopAnalysis".
+        
+        Examples
+        --------
+        >>> halos.dump("MyHalos")
+        """
+        self.write_out("%s.out" % basename)
+        self.write_particle_lists(basename)
+        self.write_particle_lists_txt(basename)
+
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
     def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
@@ -1927,3 +2183,32 @@
         self._join_halolists()
 
 HaloFinder = HOPHaloFinder
+
+class LoadHaloes(GenericHaloFinder, LoadedHaloList):
+    def __init__(self, pf, basename):
+        r"""Load the full halo data into memory.
+        
+        This function takes the output of `GenericHaloFinder.dump` and
+        re-establishes the list of halos in memory. This enables the full set
+        of halo analysis features without running the halo finder again. To
+        be precise, the particle data for each halo is only read in when
+        necessary, so examining a single halo will not require as much memory
+        as is required for halo finding.
+        
+        Parameters
+        ----------
+        basename : String
+            The base name of the files that will be read in. This should match
+            what was used when `GenericHaloFinder.dump` was called. Default =
+            "HopAnalysis".
+        
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadHaloes(pf, "HopAnalysis")
+        """
+        self.basename = basename
+        LoadedHaloList.__init__(self, pf, self.basename)
+
+
+        
\ No newline at end of file


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Fri Mar 11 10:26:28 2011 -0500
@@ -34,8 +34,7 @@
     load
 from yt.data_objects.profiles import \
     BinnedProfile1D, EmptyProfileData
-from yt.analysis_modules.halo_finding.api import \
-    HaloFinder
+from yt.analysis_modules.halo_finding.api import *
 from .halo_filters import \
     VirialFilter
 from yt.data_objects.field_info_container import \
@@ -54,15 +53,23 @@
 
 class HaloProfiler(ParallelAnalysisInterface):
     "Radial profiling, filtering, and projections for halos in cosmological simulations."
-    def __init__(self, dataset, halos='multiple', halo_list_file='HopAnalysis.out', halo_list_format='yt_hop',
-                 halo_finder_function=HaloFinder, halo_finder_args=None, halo_finder_kwargs=None,
-                 use_density_center=False, density_center_exponent=1.0, use_field_max_center=None,
+    def __init__(self, dataset, output_dir=None,
+                 halos='multiple', halo_list_file='HopAnalysis.out', 
+                 halo_list_format='yt_hop', halo_finder_function=parallelHF, 
+                 halo_finder_args=None, 
+                 halo_finder_kwargs=dict(threshold=160.0, safety=1.5, 
+                                         dm_only=False, resize=True, 
+                                         fancy_padding=True, rearrange=True),
+                 use_density_center=False, density_center_exponent=1.0,
+                 use_field_max_center=None,
                  halo_radius=0.1, radius_units='1', n_profile_bins=50,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id','center']):
         """
         Initialize a HaloProfiler object.
+        :param output_dir (str): if specified, all output will be put into this path instead of 
+               in the dataset directories.  Default: None.
         :param halos (str): "multiple" for profiling more than one halo.  In this mode halos are read in 
                from a list or identified with a halo finder.  In "single" mode, the one and only halo 
                center is identified automatically as the location of the peak in the density field.  
@@ -114,7 +121,7 @@
         """
 
         self.dataset = dataset
-
+        self.output_dir = output_dir
         self.profile_output_dir = profile_output_dir
         self.projection_output_dir = projection_output_dir
         self.n_profile_bins = n_profile_bins
@@ -132,6 +139,10 @@
         self.filtered_halos = []
         self._projection_halo_list = []
 
+        # Create output directory if specified
+        if self.output_dir is not None:
+            self.__check_directory(self.output_dir)
+
         # Set halo finder function and parameters, if needed.
         self.halo_finder_function = halo_finder_function
         self.halo_finder_args = halo_finder_args
@@ -153,7 +164,8 @@
         # dictionary: a dictionary containing fields and their corresponding columns.
         self.halo_list_file = halo_list_file
         if halo_list_format == 'yt_hop':
-            self.halo_list_format = {'id':0, 'mass':1, 'center':[7, 8, 9], 'velocity':[10, 11, 12], 'r_max':13}
+            self.halo_list_format = {'id':0, 'mass':1, 'np': 2, 
+                                     'center':[7, 8, 9], 'velocity':[10, 11, 12], 'r_max':13}
         elif halo_list_format == 'enzo_hop':
             self.halo_list_format = {'id':0, 'center':[4, 5, 6]}
         elif halo_list_format == 'p-groupfinder':
@@ -169,7 +181,8 @@
         self.density_center_exponent = density_center_exponent
         if self.use_density_center:
             def _MatterDensityXTotalMass(field, data):
-                return na.power((data['Matter_Density'] * data['TotalMassMsun']), self.density_center_exponent)
+                return na.power((data['Matter_Density'] * data['TotalMassMsun']), 
+                                self.density_center_exponent)
             def _Convert_MatterDensityXTotalMass(data):
                 return 1
             add_field("MatterDensityXTotalMass", units=r"",
@@ -288,8 +301,14 @@
         # Add profile fields necessary for calculating virial quantities.
         if virial_filter: self._check_for_needed_profile_fields()
 
-        outputDir = "%s/%s" % (self.pf.fullpath, self.profile_output_dir)
-        self.__check_directory(outputDir)
+        # Create output directory.
+        if self.output_dir is not None:
+            self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+            my_output_dir = "%s/%s/%s" % (self.output_dir, self.pf.directory, 
+                                          self.profile_output_dir)
+        else:
+            my_output_dir = "%s/%s" % (self.pf.fullpath, self.profile_output_dir)
+        self.__check_directory(my_output_dir)
 
         # Profile all halos.
         for halo in self._get_objs('all_halos', round_robin=True):
@@ -305,7 +324,7 @@
 
             if filter_result and len(self.profile_fields) > 0:
 
-                profile_filename = "%s/Halo_%04d_profile.dat" % (outputDir, halo['id'])
+                profile_filename = "%s/Halo_%04d_profile.dat" % (my_output_dir, halo['id'])
 
                 profiledHalo = self._get_halo_profile(halo, profile_filename, virial_filter=virial_filter)
 
@@ -456,8 +475,14 @@
                 (self.pf.parameters['RefineBy']**proj_level)
             projectionResolution = int(self.projection_width / proj_dx)
 
-        outputDir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
-        self.__check_directory(outputDir)
+        # Create output directory.
+        if self.output_dir is not None:
+            self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+            my_output_dir = "%s/%s/%s" % (self.output_dir, self.pf.directory, 
+                                          self.projection_output_dir)
+        else:
+            my_output_dir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
+        self.__check_directory(my_output_dir)
 
         center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
@@ -519,7 +544,7 @@
                 if save_cube:
                     axis_labels = ['x', 'y', 'z']
                     dataFilename = "%s/Halo_%04d_%s_data.h5" % \
-                            (outputDir, halo['id'], axis_labels[w])
+                            (my_output_dir, halo['id'], axis_labels[w])
                     mylog.info("Saving projection data to %s." % dataFilename)
 
                     output = h5py.File(dataFilename, "a")
@@ -535,7 +560,7 @@
                     output.close()
 
                 if save_images:
-                    pc.save("%s/Halo_%04d" % (outputDir, halo['id']), force_save=True)
+                    pc.save("%s/Halo_%04d" % (my_output_dir, halo['id']), force_save=True)
 
             del region
 
@@ -573,13 +598,17 @@
         if filename is None:
             filename = self.halo_list_file
 
-        hopFile = "%s/%s" % (self.pf.fullpath, filename)
+        if self.output_dir is not None:
+            self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+            hop_file = "%s/%s/%s" % (self.output_dir, self.pf.directory, filename)
+        else:
+            hop_file = "%s/%s" % (self.pf.fullpath, filename)
 
-        if not(os.path.exists(hopFile)):
-            mylog.info("Hop file not found, running hop to get halos.")
-            self._run_hop(hopFile)
+        if not(os.path.exists(hop_file)):
+            mylog.info("Halo finder file not found, running halo finder to get halos.")
+            self._run_hop(hop_file)
 
-        self.all_halos = self._read_halo_list(hopFile)
+        self.all_halos = self._read_halo_list(hop_file)
 
     def _read_halo_list(self, listFile):
         """
@@ -685,11 +714,11 @@
             return None
 
     @parallel_blocking_call
-    def _run_hop(self, hopFile):
+    def _run_hop(self, hop_file):
         "Run hop to get halos."
 
         hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, **self.halo_finder_kwargs)
-        hop_results.write_out(hopFile)
+        hop_results.write_out(hop_file)
 
         del hop_results
         self.pf.h.clear_all_data()
@@ -752,13 +781,13 @@
         fid.close()
 
     @parallel_root_only
-    def __check_directory(self, outputDir):
-        if (os.path.exists(outputDir)):
-            if not(os.path.isdir(outputDir)):
-                mylog.error("Output directory exists, but is not a directory: %s." % outputDir)
-                raise IOError(outputDir)
+    def __check_directory(self, my_output_dir):
+        if (os.path.exists(my_output_dir)):
+            if not(os.path.isdir(my_output_dir)):
+                mylog.error("Output directory exists, but is not a directory: %s." % my_output_dir)
+                raise IOError(my_output_dir)
         else:
-            os.mkdir(outputDir)
+            os.mkdir(my_output_dir)
 
 def shift_projections(pf, pc, oldCenter, newCenter, axis):
     """


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Fri Mar 11 10:26:28 2011 -0500
@@ -29,7 +29,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.extensions.kdtree import *
+    from yt.utilities.kdtree import *
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
@@ -42,7 +42,7 @@
     def __init__(self, pf, fields, left_edge=None, right_edge=None,
             total_values=1000000, comm_size=10000, length_type="lin",
             length_number=10, length_range=None, vol_ratio = 1,
-            salt=0):
+            salt=0, theta=None, phi=None):
         r""" Initialize a two point functions object.
         
         Parameters
@@ -81,6 +81,15 @@
             keeping everything else constant from this set: (MPI task count, 
             number of ruler lengths, ruler min/max, number of functions,
             number of point pairs per ruler length). Default = 0.
+        theta : Float
+            For random pairs of points, the second point is found by traversing
+            a distance along a ray set by the angle (phi, theta) from the first
+            point. To keep this angle constant, set ``theta`` to a value in the
+            range [0, pi]. Default = None, which will randomize theta for
+            every pair of points.
+        phi : Float
+            Similar to theta above, but the range of values is [0, 2*pi).
+            Default = None, which will randomize phi for every pair of points.
         
         Examples
         --------
@@ -95,6 +104,8 @@
             raise ImportError("You need to install the Forthon kD-Tree")
         self._fsets = []
         self.fields = fields
+        self.constant_theta = theta
+        self.constant_phi = phi
         # MPI stuff.
         self.size = self._mpi_get_size()
         self.mine = self._mpi_get_rank()
@@ -462,13 +473,20 @@
             r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim],
                 high=self.ds.right_edge[dim], size=size)
         # Next we find the second point, determined by a random
-        # theta, phi angle.
-        theta = self.mt.uniform(low=0, high=2.*math.pi, size=size)
-        phi = self.mt.uniform(low=-math.pi/2., high=math.pi/2., size=size)
+        # theta, phi angle. See Eqns. 1 & 2 from 
+        # http://mathworld.wolfram.com/SpherePointPicking.html,
+        # but phi and theta are switched to the Physics convention.
+        if self.constant_phi is None:
+            phi = self.mt.uniform(low=0, high=2.*math.pi, size=size)
+        else: phi = self.constant_phi * na.ones(size, dtype='float64')
+        if self.constant_theta is None:
+            v = self.mt.uniform(low=0., high=1, size=size)
+            theta = na.arccos(2 * v - 1)
+        else: theta = self.constant_theta * na.ones(size, dtype='float64')
         r2 = na.empty((size,3), dtype='float64')
-        r2[:,0] = r1[:,0] + length * na.cos(theta) * na.cos(phi)
-        r2[:,1] = r1[:,1] + length * na.sin(theta) * na.cos(phi)
-        r2[:,2] = r1[:,2] + length * na.sin(phi)
+        r2[:,0] = r1[:,0] + length * na.cos(phi) * na.sin(theta)
+        r2[:,1] = r1[:,1] + length * na.sin(phi) * na.sin(theta)
+        r2[:,2] = r1[:,2] + length * na.cos(theta)
         # Reflect so it's inside the (full) volume.
         r2 %= self.period
         return (r1, r2)


--- a/yt/data_objects/data_containers.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/data_objects/data_containers.py	Fri Mar 11 10:26:28 2011 -0500
@@ -136,13 +136,19 @@
         self.real_grid = grid
         self.child_mask = 1
         self.ActiveDimensions = self.data['x'].shape
+        self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
+        
     def __getitem__(self, field):
         if field not in self.data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
-                tr = na.sqrt( (self['x'] - center[0])**2.0 +
-                              (self['y'] - center[1])**2.0 +
-                              (self['z'] - center[2])**2.0 )
+                tempx = na.abs(self['x'] - center[0])
+                tempx = na.minimum(tempx, self.DW[0] - tempx)
+                tempy = na.abs(self['y'] - center[1])
+                tempy = na.minimum(tempy, self.DW[1] - tempy)
+                tempz = na.abs(self['z'] - center[2])
+                tempz = na.minimum(tempz, self.DW[2] - tempz)
+                tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
         else: tr = self.data[field]


--- a/yt/data_objects/particle_io.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/data_objects/particle_io.py	Fri Mar 11 10:26:28 2011 -0500
@@ -129,7 +129,10 @@
         ParticleIOHandler.__init__(self, pf, source)
 
     def _get_args(self):
-        return (1, (na.array(self.center, dtype='float64'), self.radius))
+        DLE = na.array(self.pf.domain_left_edge, dtype='float64')
+        DRE = na.array(self.pf.domain_right_edge, dtype='float64')
+        return (1, (na.array(self.center, dtype='float64'), self.radius,
+            1, DLE, DRE))
 
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
     _source_type = "disk"


--- a/yt/frontends/chombo/data_structures.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/frontends/chombo/data_structures.py	Fri Mar 11 10:26:28 2011 -0500
@@ -24,13 +24,34 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
+import re
+import os
+import weakref
+import numpy as na
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     pluto2enzoDict, \
+     yt2plutoFieldsDict, \
+     parameterDict \
+     
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
-           AMRGridPatch
+     AMRGridPatch
 from yt.data_objects.hierarchy import \
-           AMRHierarchy
+     AMRHierarchy
 from yt.data_objects.static_output import \
-           StaticOutput
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion
 
 from .fields import ChomboFieldContainer
 
@@ -66,12 +87,15 @@
     grid = ChomboGrid
     
     def __init__(self,pf,data_style='chombo_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
+        self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
         self.data_style = data_style
         self.field_info = ChomboFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.hierarchy = os.path.abspath(self.hierarchy_filename)
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = h5py.File(self.hierarchy_filename)
 
@@ -117,8 +141,8 @@
                                start = si, stop = ei)
                 self.grids.append(pg)
                 self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type) + 1)
+                self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
@@ -152,18 +176,13 @@
     _fieldinfo_class = ChomboFieldContainer
     
     def __init__(self, filename, data_style='chombo_hdf5',
-                 storage_filename = None):
+                 storage_filename = None, ini_filename = None):
+        # hardcoded for now 
+        self.current_time = 0.0
+        self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-
         self.field_info = self._fieldinfo_class()
-        # hardcoded for now
-        self.current_time = 0.0
-        # These should be explicitly obtained from the file, but for now that
-        # will wait until a reorganization of the source tree and better
-        # generalization.
-        self.dimensionality = 3
-        self.refine_by = 2
         
     def _set_units(self):
         """
@@ -177,11 +196,11 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
-        for key in yt2orionFieldsDict:
+        for key in yt2plutoFieldsDict:
             self.conversion_factors[key] = 1.0
 
     def _setup_nounits_units(self):
@@ -194,13 +213,81 @@
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
 
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
     def _parse_parameter_file(self):
+        """
+        Check to see whether a 'pluto.ini' or 'orion2.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise, assume the left edge starts at 0 and get the right edge
+        from the hdf5 file.
+        """
+        if os.path.isfile('pluto.ini'):
+            self._parse_pluto_file('pluto.ini')
+        elif os.path.isfile('orion2.ini'):
+            self._parse_pluto_file('orion2.ini')
+        else:
+            self.unique_identifier = \
+                                   int(os.stat(self.parameter_filename)[ST_CTIME])
+            self.domain_left_edge = na.array([0.,0.,0.])
+            self.domain_right_edge = self.__calc_right_edge()
+            self.dimensionality = 3
+            self.refine_by = 2
+
+    def _parse_pluto_file(self, ini_filename):
+        """
+        Reads in an inputs file in the 'pluto.ini' format. Probably not
+        especially robust at the moment.
+        """
+        self.fullplotdir = os.path.abspath(self.parameter_filename)
+        self.ini_filename = self._localize( \
+            self.ini_filename, ini_filename)
         self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.domain_left_edge = na.array([0.,0.,0.])
-        self.domain_right_edge = self.__calc_right_edge()
-        
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        lines = open(self.ini_filename).readlines()
+        # read the file line by line, storing important parameters
+        for lineI, line in enumerate(lines):
+            try: 
+                param, sep, vals = map(rstrip,line.partition(' '))
+            except ValueError:
+                mylog.error("ValueError: '%s'", line)
+            if pluto2enzoDict.has_key(param):
+                paramName = pluto2enzoDict[param]
+                t = map(parameterDict[paramName], vals.split())
+                if len(t) == 1:
+                    self.parameters[paramName] = t[0]
+                else:
+                    if paramName == "RefineBy":
+                        self.parameters[paramName] = t[0]
+                    else:
+                        self.parameters[paramName] = t
 
+            # assumes 3D for now
+            elif param.startswith("X1-grid"):
+                t = vals.split()
+                low1 = float(t[1])
+                high1 = float(t[4])
+                N1 = int(t[2])
+            elif param.startswith("X2-grid"):
+                t = vals.split()
+                low2 = float(t[1])
+                high2 = float(t[4])
+                N2 = int(t[2])
+            elif param.startswith("X3-grid"):
+                t = vals.split()
+                low3 = float(t[1])
+                high3 = float(t[4])
+                N3 = int(t[2])
+
+        self.dimensionality = 3
+        self.domain_left_edge = na.array([low1,low2,low3])
+        self.domain_right_edge = na.array([high1,high2,high3])
+        self.domain_dimensions = na.array([N1,N2,N3])
+        self.refine_by = self.parameters["RefineBy"]
+            
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']


--- a/yt/frontends/chombo/definitions.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/frontends/chombo/definitions.py	Fri Mar 11 10:26:28 2011 -0500
@@ -23,3 +23,43 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+pluto2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2plutoFieldsDict = {}
+pluto2ytFieldsDict = {}
+


--- a/yt/frontends/chombo/io.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/frontends/chombo/io.py	Fri Mar 11 10:26:28 2011 -0500
@@ -23,6 +23,9 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+import h5py
+import re
+
 from yt.utilities.io_handler import \
            BaseIOHandler
 


--- a/yt/frontends/enzo/data_structures.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/frontends/enzo/data_structures.py	Fri Mar 11 10:26:28 2011 -0500
@@ -226,7 +226,9 @@
         si, ei, LE, RE, fn, np = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         f.readline() # Blank at top
+        pbar = get_pbar("Parsing Hierarchy", self.num_grids)
         for grid_id in xrange(self.num_grids):
+            pbar.update(grid_id)
             # We will unroll this list
             si.append(_next_token_line("GridStartIndex", f))
             ei.append(_next_token_line("GridEndIndex", f))
@@ -246,6 +248,7 @@
                     continue
                 params = line.split()
                 line = f.readline()
+        pbar.finish()
         self._fill_arrays(ei, si, LE, RE, np)
         self.grids = na.array(self.grids, dtype='object')
         self.filenames = fn


--- a/yt/utilities/hdf5_light_reader.c	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/utilities/hdf5_light_reader.c	Fri Mar 11 10:26:28 2011 -0500
@@ -38,6 +38,7 @@
 #define npy_float128 npy_longdouble
 #endif
 
+#define MIN(a,b) ((a) <= (b) ? (a) : (b))
 
 static PyObject *_hdf5ReadError;
 herr_t iterate_dataset(hid_t loc_id, const char *name, void *nodelist);
@@ -78,6 +79,8 @@
     /* These cannot contain any pointers */
     npy_float64 center[3];
     npy_float64 radius;
+    npy_float64 period[3];
+    int periodic;
 } sphere_validation;
 
 typedef struct cylinder_validation_ {
@@ -992,6 +995,8 @@
     /* These are borrowed references */
     PyArrayObject *center = (PyArrayObject *) PyTuple_GetItem(InputData, 0);
     PyObject *radius = (PyObject *) PyTuple_GetItem(InputData, 1);
+    PyObject *operiodic = PyTuple_GetItem(InputData, 2);
+    npy_float64 DW;
 
     /* This will get freed in the finalization of particle validation */
     sphere_validation *sv = (sphere_validation *)
@@ -1004,6 +1009,18 @@
 
     sv->radius = (npy_float64) PyFloat_AsDouble(radius);
 
+    sv->periodic = PyInt_AsLong(operiodic);
+    if(sv->periodic == 1) {
+      PyArrayObject *domain_left_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 3);
+      PyArrayObject *domain_right_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 4);
+      for (i = 0; i < 3; i++){
+        DW = (*(npy_float64*) PyArray_GETPTR1(domain_right_edge, i))
+           - (*(npy_float64*) PyArray_GETPTR1(domain_left_edge, i));
+        sv->period[i] = DW;
+        //fprintf(stderr, "Setting period equal to %lf\n", sv->period[i]);
+      }
+    }
+
     data->count_func = NULL;
     data->count_func_float = count_particles_sphere_FLOAT;
     data->count_func_double = count_particles_sphere_DOUBLE;
@@ -1523,6 +1540,7 @@
 
     double pradius;
 
+    if (vdata->periodic == 0) {
       for (ind = 0; ind < data->particles_to_check; ind++) {
         pradius = 0.0;
         tempr = (particle_position_x[ind] - vdata->center[0]); pradius += tempr*tempr;
@@ -1537,6 +1555,25 @@
           data->mask[ind] = 0;
         }
       }
+    } else {
+      for (ind = 0; ind < data->particles_to_check; ind++) {
+        pradius = 0.0;
+        tempr = fabs(particle_position_x[ind] - vdata->center[0]);
+        tempr = MIN(tempr, vdata->period[0] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_y[ind] - vdata->center[1]);
+        tempr = MIN(tempr, vdata->period[1] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_z[ind] - vdata->center[2]);
+        tempr = MIN(tempr, vdata->period[2] - tempr); pradius += tempr*tempr;
+        pradius = pow(pradius, 0.5);
+        if (pradius <= vdata->radius) {
+          if(data->update_count == 1) data->total_valid_particles++;
+          data->mask[ind] = 1;
+          n++;
+        } else {
+          data->mask[ind] = 0;
+        }
+      }
+    }
     return n;
 }
 
@@ -1560,6 +1597,7 @@
 
     double pradius;
 
+    if (vdata->periodic == 0) {
       for (ind = 0; ind < data->particles_to_check; ind++) {
         pradius = 0.0;
         tempr = (particle_position_x[ind] - vdata->center[0]); pradius += tempr*tempr;
@@ -1574,6 +1612,25 @@
           data->mask[ind] = 0;
         }
       }
+    } else {
+      for (ind = 0; ind < data->particles_to_check; ind++) {
+        pradius = 0.0;
+        tempr = fabs(particle_position_x[ind] - vdata->center[0]);
+        tempr = MIN(tempr, vdata->period[0] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_y[ind] - vdata->center[1]);
+        tempr = MIN(tempr, vdata->period[1] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_z[ind] - vdata->center[2]);
+        tempr = MIN(tempr, vdata->period[2] - tempr); pradius += tempr*tempr;
+        pradius = pow(pradius, 0.5);
+        if (pradius <= vdata->radius) {
+          if(data->update_count == 1) data->total_valid_particles++;
+          data->mask[ind] = 1;
+          n++;
+        } else {
+          data->mask[ind] = 0;
+        }
+      }
+    }
     return n;
 }
 
@@ -1597,6 +1654,7 @@
 
     long double pradius;
 
+    if (vdata->periodic == 0) {
       for (ind = 0; ind < data->particles_to_check; ind++) {
         pradius = 0.0;
         tempr = (particle_position_x[ind] - vdata->center[0]); pradius += tempr*tempr;
@@ -1611,6 +1669,25 @@
           data->mask[ind] = 0;
         }
       }
+    } else {
+      for (ind = 0; ind < data->particles_to_check; ind++) {
+        pradius = 0.0;
+        tempr = fabs(particle_position_x[ind] - vdata->center[0]);
+        tempr = MIN(tempr, vdata->period[0] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_y[ind] - vdata->center[1]);
+        tempr = MIN(tempr, vdata->period[1] - tempr); pradius += tempr*tempr;
+        tempr = fabs(particle_position_z[ind] - vdata->center[2]);
+        tempr = MIN(tempr, vdata->period[2] - tempr); pradius += tempr*tempr;
+        pradius = pow(pradius, 0.5);
+        if (pradius <= vdata->radius) {
+          if(data->update_count == 1) data->total_valid_particles++;
+          data->mask[ind] = 1;
+          n++;
+        } else {
+          data->mask[ind] = 0;
+        }
+      }
+    }
     return n;
 }
 


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Fri Mar 11 10:26:28 2011 -0500
@@ -1118,7 +1118,7 @@
         self._barrier()
         # We use old-school pickling here on the assumption the arrays are
         # relatively small ( < 1e7 elements )
-        if isinstance(data, na.ndarray):
+        if isinstance(data, na.ndarray) and data.dtype != na.bool:
             tr = na.zeros_like(data)
             if not data.flags.c_contiguous: data = data.copy()
             MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)


--- a/yt/utilities/performance_counters.py	Thu Mar 03 21:08:53 2011 -0500
+++ b/yt/utilities/performance_counters.py	Fri Mar 11 10:26:28 2011 -0500
@@ -100,3 +100,36 @@
 
 yt_counters = PerformanceCounters()
 time_function = yt_counters.call_func
+
+
+class ProfilingController(object):
+    def __init__(self):
+        self.profilers = {}
+
+    def profile_function(self, function_name):
+        def wrapper(func):
+            try:
+                import cProfile
+            except ImportError:
+                return func
+            my_prof = cProfile.Profile()
+            self.profilers[function_name] = my_prof
+            @wraps(func)
+            def run_in_profiler(*args, **kwargs):
+                my_prof.enable()
+                func(*args, **kwargs)
+                my_prof.disable()
+            return run_in_profiler
+        return wrapper
+
+    def write_out(self, filename_prefix):
+        if ytcfg.getboolean("yt","__parallel"):
+            pfn = "%s_%03i_%03i" % (filename_prefix,
+                     ytcfg.getint("yt", "__parallel_rank"),
+                    ytcfg.getint("yt", "__parallel_size"))
+        else:
+            pfn = "%s" % (filename_prefix)
+        for n, p in sorted(self.profilers.items()):
+            fn = "%s_%s.cprof" % (pfn, n)
+            mylog.info("Dumping %s into %s", n, fn)
+            p.dump_stats(fn)


http://bitbucket.org/yt_analysis/yt/changeset/a22acab0884c/
changeset:   r3824:a22acab0884c
branch:      yt
user:        brittonsmith
date:        2011-03-13 19:38:39
summary:     Reorganized some cosmological density fields.
affected #:  2 files (2.5 KB)

--- a/yt/data_objects/universal_fields.py	Fri Mar 11 10:26:28 2011 -0500
+++ b/yt/data_objects/universal_fields.py	Sun Mar 13 14:38:39 2011 -0400
@@ -53,7 +53,8 @@
      sigma_thompson, \
      clight, \
      kboltz, \
-     G
+     G, \
+     rho_crit_now
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -388,6 +389,40 @@
     return (data['Density'] + data['Dark_Matter_Density'])
 add_field("Matter_Density",function=_Matter_Density,units=r"\rm{g}/\rm{cm^3}")
 
+def _ComovingDensity(field, data):
+    ef = (1.0 + data.pf.current_redshift)**3.0
+    return data["Density"]/ef
+add_field("ComovingDensity", function=_ComovingDensity, units=r"\rm{g}/\rm{cm}^3")
+
+# This is rho_total / rho_cr(z).
+def _Convert_Overdensity(data):
+    return 1 / (rho_crit_now * data.pf.hubble_constant**2 * 
+                (1+data.pf.current_redshift)**3)
+add_field("Overdensity",function=_Matter_Density,
+          convert_function=_Convert_Overdensity, units=r"")
+
+# This is (rho_total - <rho_total>) / <rho_total>
+def _DensityPerturbation(field, data):
+    rho_bar = rho_crit_now * data.pf.omega_matter * \
+        data.pf.hubble_constant**2 * \
+        (1.0 + data.pf.current_redshift)**3
+    return ((data['Matter_Density'] - rho_bar) / rho_bar)
+add_field("DensityPerturbation",function=_DensityPerturbation,units=r"")
+
+# This is rho_b / <rho_b>.
+def _Baryon_Overdensity(field, data):
+    return data['Density']
+def _Convert_Baryon_Overdensity(data):
+    if data.pf.parameters.has_key('omega_baryon_now'):
+        omega_baryon_now = data.pf.parameters['omega_baryon_now']
+    else:
+        omega_baryon_now = 0.0441
+    return 1 / (omega_baryon_now * rho_crit_now * 
+                (data.pf['CosmologyHubbleConstantNow']**2) * 
+                ((1+data.pf['CosmologyCurrentRedshift'])**3))
+add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
+          convert_function=_Convert_Baryon_Overdensity, units=r"")
+
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:


--- a/yt/frontends/enzo/fields.py	Fri Mar 11 10:26:28 2011 -0500
+++ b/yt/frontends/enzo/fields.py	Sun Mar 13 14:38:39 2011 -0400
@@ -34,7 +34,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh, rho_crit_now
+    mh
 import yt.utilities.amr_utils as amr_utils
 
 class EnzoFieldContainer(CodeFieldInfoContainer):
@@ -206,31 +206,6 @@
           function=_NumberDensity,
           convert_function=_ConvertNumberDensity)
 
-def _ComovingDensity(field,data):
-    ef = (1.0 + data.pf.current_redshift)**3.0
-    return data["Density"]/ef
-add_field("ComovingDensity", function=_ComovingDensity, units=r"\rm{g}/\rm{cm}^3")
-
-# This is rho_total / rho_cr(z).
-def Overdensity(field,data):
-    return (data['Density'] + data['Dark_Matter_Density']) / \
-        (rho_crit_now * (data.pf.hubble_constant**2) * ((1+data.pf.current_redshift)**3))
-add_field("Overdensity",function=Overdensity,units=r"")
-
-# This is rho_b / <rho_b>.
-def _Baryon_Overdensity(field, data):
-    return data['Density']
-def _Convert_Baryon_Overdensity(data):
-    if data.pf.parameters.has_key('omega_baryon_now'):
-        omega_baryon_now = data.pf.parameters['omega_baryon_now']
-    else:
-        omega_baryon_now = 0.0441
-    return 1 / (omega_baryon_now * rho_crit_now * 
-                (data.pf['CosmologyHubbleConstantNow']**2) * 
-                ((1+data.pf['CosmologyCurrentRedshift'])**3))
-add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
-          convert_function=_Convert_Baryon_Overdensity, units=r"")
-
 # Now we add all the fields that we want to control, but we give a null function
 # This is every Enzo field we can think of.  This will be installation-dependent,
 


http://bitbucket.org/yt_analysis/yt/changeset/17001aed4f8f/
changeset:   r3825:17001aed4f8f
branch:      yt
user:        brittonsmith
date:        2011-03-13 21:12:41
summary:     Added weak lensing convergence field.
affected #:  1 file (1.3 KB)

--- a/yt/data_objects/universal_fields.py	Sun Mar 13 14:38:39 2011 -0400
+++ b/yt/data_objects/universal_fields.py	Sun Mar 13 16:12:41 2011 -0400
@@ -35,6 +35,7 @@
 from yt.funcs import *
 
 from yt.utilities.amr_utils import CICDeposit_3
+from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
     ValidateDataField, \
@@ -54,7 +55,8 @@
      clight, \
      kboltz, \
      G, \
-     rho_crit_now
+     rho_crit_now, \
+     speed_of_light_cgs
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -401,7 +403,7 @@
 add_field("Overdensity",function=_Matter_Density,
           convert_function=_Convert_Overdensity, units=r"")
 
-# This is (rho_total - <rho_total>) / <rho_total>
+# This is (rho_total - <rho_total>) / <rho_total>.
 def _DensityPerturbation(field, data):
     rho_bar = rho_crit_now * data.pf.omega_matter * \
         data.pf.hubble_constant**2 * \
@@ -423,6 +425,29 @@
 add_field("Baryon_Overdensity", function=_Baryon_Overdensity, 
           convert_function=_Convert_Baryon_Overdensity, units=r"")
 
+# Weak lensing convergence.
+# Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
+def _convertConvergence(data):
+    if not pf.parameters.has_key('cosmology_calculator'):
+        pf.parameters['cosmology_calculator'] = Cosmology(
+            HubbleConstantNow=(100.*pf.hubble_constant),
+            OmegaMatterNow=pf.omega_matter, OmegaLambdaNow=pf.omega_lambda)
+    # observer to lens
+    DL = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        pf.parameters['observer_redshift'], pf.current_redshift)
+    # observer to source
+    DS = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        pf.parameters['observer_redshift'], pf.parameters['lensing_source_redshift'])
+    # lens to source
+    DLS = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        pf.current_redshift, pf.parameters['lensing_source_redshift'])
+    return (((DL * DLS) / DS) * (1.5e14 * pf.omega_matter * 
+                                (pf.hubble_constant / speed_of_light_cgs)**2 *
+                                (1 + pf.current_redshift)))
+add_field("WeakLensingConvergence", function=_DensityPerturbation, 
+          convert_function=_convertConvergence, 
+          projection_conversion='mpccm')
+
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:


http://bitbucket.org/yt_analysis/yt/changeset/8bcffa1367ab/
changeset:   r3826:8bcffa1367ab
branch:      yt
user:        brittonsmith
date:        2011-03-14 20:05:14
summary:     Added ability to add extra parameters to pfs for light cones and
fixed some issues with weak lensing convergence field.
affected #:  2 files (326 bytes)

--- a/yt/analysis_modules/light_cone/light_cone.py	Sun Mar 13 16:12:41 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 15:05:14 2011 -0400
@@ -47,7 +47,7 @@
                  final_redshift=0.0, observer_redshift=0.0,
                  field_of_view_in_arcminutes=600.0, image_resolution_in_arcseconds=60.0, 
                  use_minimum_datasets=True, deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
-                 output_dir='LC', output_prefix='LightCone', **kwargs):
+                 set_parameters=None, output_dir='LC', output_prefix='LightCone', **kwargs):
         """
         Initialize a LightCone object.
         :param initial_redshift (float): the initial (highest) redshift for the light cone.  Default: 1.0.
@@ -67,6 +67,7 @@
                the projection axis and center.  This was invented to allow light cones with thin slices to 
                sample coherent large scale structure, but in practice does not work so well.  Try setting 
                this parameter to 1 and see what happens.  Default: 0.0.
+        :param set_parameters (dict): dictionary of parameters to attach to pf.parameters.  Default: None.
         :param output_dir (str): the directory in which images and data files will be written.  Default: 'LC'.
         :param output_prefix (str): the prefix of all images and data files.  Default: 'LightCone'.
         """
@@ -79,6 +80,7 @@
         self.use_minimum_datasets = use_minimum_datasets
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
+        self.set_parameters = set_parameters
         self.output_dir = output_dir
         self.output_prefix = output_prefix
 
@@ -269,6 +271,7 @@
                 name = "%s%s_%s_%04d_%04d" % (self.output_dir, self.output_prefix,
                                               node, q, len(self.light_cone_solution))
             output['object'] = load(output['filename'])
+            output['object'].parameters.update(self.set_parameters)
             frb = LightConeProjection(output, field, self.pixels, weight_field=weight_field,
                                       save_image=save_slice_images,
                                       name=name, node=node, **kwargs)


--- a/yt/data_objects/universal_fields.py	Sun Mar 13 16:12:41 2011 -0400
+++ b/yt/data_objects/universal_fields.py	Mon Mar 14 15:05:14 2011 -0400
@@ -428,22 +428,22 @@
 # Weak lensing convergence.
 # Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
 def _convertConvergence(data):
-    if not pf.parameters.has_key('cosmology_calculator'):
-        pf.parameters['cosmology_calculator'] = Cosmology(
-            HubbleConstantNow=(100.*pf.hubble_constant),
-            OmegaMatterNow=pf.omega_matter, OmegaLambdaNow=pf.omega_lambda)
+    if not data.pf.parameters.has_key('cosmology_calculator'):
+        data.pf.parameters['cosmology_calculator'] = Cosmology(
+            HubbleConstantNow=(100.*data.pf.hubble_constant),
+            OmegaMatterNow=data.pf.omega_matter, OmegaLambdaNow=data.pf.omega_lambda)
     # observer to lens
-    DL = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        pf.parameters['observer_redshift'], pf.current_redshift)
+    DL = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        data.pf.parameters['observer_redshift'], data.pf.current_redshift)
     # observer to source
-    DS = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        pf.parameters['observer_redshift'], pf.parameters['lensing_source_redshift'])
+    DS = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        data.pf.parameters['observer_redshift'], data.pf.parameters['lensing_source_redshift'])
     # lens to source
-    DLS = pf.parameters['cosmology_calculator'].AngularDiameterDistance(
-        pf.current_redshift, pf.parameters['lensing_source_redshift'])
-    return (((DL * DLS) / DS) * (1.5e14 * pf.omega_matter * 
-                                (pf.hubble_constant / speed_of_light_cgs)**2 *
-                                (1 + pf.current_redshift)))
+    DLS = data.pf.parameters['cosmology_calculator'].AngularDiameterDistance(
+        data.pf.current_redshift, data.pf.parameters['lensing_source_redshift'])
+    return (((DL * DLS) / DS) * (1.5e14 * data.pf.omega_matter * 
+                                (data.pf.hubble_constant / speed_of_light_cgs)**2 *
+                                (1 + data.pf.current_redshift)))
 add_field("WeakLensingConvergence", function=_DensityPerturbation, 
           convert_function=_convertConvergence, 
           projection_conversion='mpccm')


http://bitbucket.org/yt_analysis/yt/changeset/788d42b67438/
changeset:   r3827:788d42b67438
branch:      yt
user:        brittonsmith
date:        2011-03-14 20:35:25
summary:     Removed plot collection from light cone generator.
affected #:  2 files (4.6 KB)

--- a/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 15:05:14 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 15:35:25 2011 -0400
@@ -34,8 +34,6 @@
 from yt.config import ytcfg
 from yt.convenience import load
 from yt.utilities.cosmology import Cosmology
-from yt.visualization.plot_collection import \
-    PlotCollection
 
 from .common_n_volume import commonNVolume
 from .halo_mask import light_cone_halo_map, \
@@ -308,9 +306,6 @@
                     if weight_field is not None:
                         self.projection_weight_field_stack = [sum(self.projection_weight_field_stack)]
 
-            # Delete the plot collection now that the frb is deleted.
-            del output['pc']
-
             # Unless this is the last slice, delete the dataset object.
             # The last one will be saved to make the plot collection.
             if (q < len(self.light_cone_solution) - 1):
@@ -344,17 +339,6 @@
                 else:
                     mylog.error("No halo mask loaded, call get_halo_mask.")
 
-            # Make a plot collection for the light cone projection.
-            center = [0.5 * (self.light_cone_solution[-1]['object'].parameters['DomainLeftEdge'][w] + 
-                             self.light_cone_solution[-1]['object'].parameters['DomainRightEdge'][w])
-                      for w in range(self.light_cone_solution[-1]['object'].parameters['TopGridRank'])]
-            pc = PlotCollection(self.light_cone_solution[-1]['object'], center=center)
-            pc.add_fixed_resolution_plot(frb, field, **kwargs)
-            pc.save(filename)
-
-            # Return the plot collection so the user can remake the plot if they want.
-            return pc
-
     def rerandomize_light_cone_solution(self, newSeed, recycle=True, filename=None):
         """
         When making a projection for a light cone, only randomizations along the line of sight make any 


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Mon Mar 14 15:05:14 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Mon Mar 14 15:35:25 2011 -0400
@@ -30,8 +30,6 @@
 from yt.config import ytcfg
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
-from yt.visualization.plot_collection import \
-    PlotCollection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
 
@@ -61,10 +59,6 @@
                             lightConeSlice['object'].parameters['DomainLeftEdge'][q]) \
                          for q in range(lightConeSlice['object'].parameters['TopGridRank'])]
 
-    # Make the plot collection and put it in the slice so we can delete it cleanly in the same scope 
-    # as where the frb will be deleted.
-    lightConeSlice['pc'] = PlotCollection(lightConeSlice['object'], center=region_center)
-
     # 1. The Depth Problem
     # Use coordinate field cut in line of sight to cut projection to proper depth.
     if field_cuts is None:
@@ -90,11 +84,9 @@
         these_field_cuts.append(cut_mask)
 
     # Make projection.
-    lightConeSlice['pc'].add_projection(field, lightConeSlice['ProjectionAxis'], 
-                                        weight_field=weight_field, 
-                                        field_parameters=dict(field_cuts=these_field_cuts, 
-                                                              node_name=node_name),
-                                        **kwargs)
+    proj = lightConeSlice['object'].h.proj(lightConeSlice['ProjectionAxis'], field, 
+                                           weight_field, center=region_center, 
+                                           field_cuts=these_field_cuts, node_name=node_name)
 
     # If parallel: all the processes have the whole projection object, but we only need to do the tiling, shifting, and cutting once.
     if ytcfg.getint("yt", "__parallel_rank") == 0:
@@ -103,30 +95,24 @@
         # Tile projection to specified width.
 
         # Original projection data.
-        original_px = copy.deepcopy(lightConeSlice['pc'].plots[0].data['px'])
-        original_py = copy.deepcopy(lightConeSlice['pc'].plots[0].data['py'])
-        original_pdx = copy.deepcopy(lightConeSlice['pc'].plots[0].data['pdx'])
-        original_pdy = copy.deepcopy(lightConeSlice['pc'].plots[0].data['pdy'])
-        original_field = copy.deepcopy(lightConeSlice['pc'].plots[0].data[field])
-        original_weight_field = copy.deepcopy(lightConeSlice['pc'].plots[0].data['weight_field'])
+        original_px = copy.deepcopy(proj['px'])
+        original_py = copy.deepcopy(proj['py'])
+        original_pdx = copy.deepcopy(proj['pdx'])
+        original_pdy = copy.deepcopy(proj['pdy'])
+        original_field = copy.deepcopy(proj[field])
+        original_weight_field = copy.deepcopy(proj['weight_field'])
 
         # Copy original into offset positions to make tiles.
         for x in range(int(na.ceil(lightConeSlice['WidthBoxFraction']))):
             for y in range(int(na.ceil(lightConeSlice['WidthBoxFraction']))):
                 if ((x + y) > 0):
-                    lightConeSlice['pc'].plots[0].data['px'] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data['px'], original_px+x])
-                    lightConeSlice['pc'].plots[0].data['py'] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data['py'], original_py+y])
-                    lightConeSlice['pc'].plots[0].data['pdx'] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data['pdx'], original_pdx])
-                    lightConeSlice['pc'].plots[0].data['pdy'] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data['pdy'], original_pdy])
-                    lightConeSlice['pc'].plots[0].data[field] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data[field], original_field])
-                    lightConeSlice['pc'].plots[0].data['weight_field'] = \
-                        na.concatenate([lightConeSlice['pc'].plots[0].data['weight_field'], 
-                                        original_weight_field])
+                    proj['px'] = na.concatenate([proj['px'], original_px+x])
+                    proj['py'] = na.concatenate([proj['py'], original_py+y])
+                    proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
+                    proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
+                    proj[field] = na.concatenate([proj[field], original_field])
+                    proj['weight_field'] = na.concatenate([proj['weight_field'], 
+                                                           original_weight_field])
 
         # Delete originals.
         del original_px
@@ -144,86 +130,74 @@
         del offset[lightConeSlice['ProjectionAxis']]
 
         # Shift x and y positions.
-        lightConeSlice['pc'].plots[0]['px'] -= offset[0]
-        lightConeSlice['pc'].plots[0]['py'] -= offset[1]
+        proj['px'] -= offset[0]
+        proj['py'] -= offset[1]
 
         # Wrap off-edge cells back around to other side (periodic boundary conditions).
-        lightConeSlice['pc'].plots[0]['px'][lightConeSlice['pc'].plots[0]['px'] < 0] += \
-            na.ceil(lightConeSlice['WidthBoxFraction'])
-        lightConeSlice['pc'].plots[0]['py'][lightConeSlice['pc'].plots[0]['py'] < 0] += \
-            na.ceil(lightConeSlice['WidthBoxFraction'])
+        proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['WidthBoxFraction'])
+        proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['WidthBoxFraction'])
 
         # After shifting, some cells have fractional coverage on both sides of the box.
         # Find those cells and make copies to be placed on the other side.
 
         # Cells hanging off the right edge.
-        add_x_right = lightConeSlice['pc'].plots[0]['px'] + \
-            0.5 * lightConeSlice['pc'].plots[0]['pdx'] > na.ceil(lightConeSlice['WidthBoxFraction'])
-        add_x_px = lightConeSlice['pc'].plots[0]['px'][add_x_right]
+        add_x_right = proj['px'] + 0.5 * proj['pdx'] > na.ceil(lightConeSlice['WidthBoxFraction'])
+        add_x_px = proj['px'][add_x_right]
         add_x_px -= na.ceil(lightConeSlice['WidthBoxFraction'])
-        add_x_py = lightConeSlice['pc'].plots[0]['py'][add_x_right]
-        add_x_pdx = lightConeSlice['pc'].plots[0]['pdx'][add_x_right]
-        add_x_pdy = lightConeSlice['pc'].plots[0]['pdy'][add_x_right]
-        add_x_field = lightConeSlice['pc'].plots[0][field][add_x_right]
-        add_x_weight_field = lightConeSlice['pc'].plots[0]['weight_field'][add_x_right]
+        add_x_py = proj['py'][add_x_right]
+        add_x_pdx = proj['pdx'][add_x_right]
+        add_x_pdy = proj['pdy'][add_x_right]
+        add_x_field = proj[field][add_x_right]
+        add_x_weight_field = proj['weight_field'][add_x_right]
         del add_x_right
 
         # Cells hanging off the left edge.
-        add_x_left = lightConeSlice['pc'].plots[0]['px'] - \
-            0.5 * lightConeSlice['pc'].plots[0]['pdx'] < 0
-        add2_x_px = lightConeSlice['pc'].plots[0]['px'][add_x_left]
+        add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
+        add2_x_px = proj['px'][add_x_left]
         add2_x_px += na.ceil(lightConeSlice['WidthBoxFraction'])
-        add2_x_py = lightConeSlice['pc'].plots[0]['py'][add_x_left]
-        add2_x_pdx = lightConeSlice['pc'].plots[0]['pdx'][add_x_left]
-        add2_x_pdy = lightConeSlice['pc'].plots[0]['pdy'][add_x_left]
-        add2_x_field = lightConeSlice['pc'].plots[0][field][add_x_left]
-        add2_x_weight_field = lightConeSlice['pc'].plots[0]['weight_field'][add_x_left]
+        add2_x_py = proj['py'][add_x_left]
+        add2_x_pdx = proj['pdx'][add_x_left]
+        add2_x_pdy = proj['pdy'][add_x_left]
+        add2_x_field = proj[field][add_x_left]
+        add2_x_weight_field = proj['weight_field'][add_x_left]
         del add_x_left
 
         # Cells hanging off the top edge.
-        add_y_right = lightConeSlice['pc'].plots[0]['py'] + \
-            0.5 * lightConeSlice['pc'].plots[0]['pdy'] > na.ceil(lightConeSlice['WidthBoxFraction'])
-        add_y_px = lightConeSlice['pc'].plots[0]['px'][add_y_right]
-        add_y_py = lightConeSlice['pc'].plots[0]['py'][add_y_right]
+        add_y_right = proj['py'] + 0.5 * proj['pdy'] > na.ceil(lightConeSlice['WidthBoxFraction'])
+        add_y_px = proj['px'][add_y_right]
+        add_y_py = proj['py'][add_y_right]
         add_y_py -= na.ceil(lightConeSlice['WidthBoxFraction'])
-        add_y_pdx = lightConeSlice['pc'].plots[0]['pdx'][add_y_right]
-        add_y_pdy = lightConeSlice['pc'].plots[0]['pdy'][add_y_right]
-        add_y_field = lightConeSlice['pc'].plots[0][field][add_y_right]
-        add_y_weight_field = lightConeSlice['pc'].plots[0]['weight_field'][add_y_right]
+        add_y_pdx = proj['pdx'][add_y_right]
+        add_y_pdy = proj['pdy'][add_y_right]
+        add_y_field = proj[field][add_y_right]
+        add_y_weight_field = proj['weight_field'][add_y_right]
         del add_y_right
 
         # Cells hanging off the bottom edge.
-        add_y_left = lightConeSlice['pc'].plots[0]['py'] - \
-            0.5 * lightConeSlice['pc'].plots[0]['pdy'] < 0
-        add2_y_px = lightConeSlice['pc'].plots[0]['px'][add_y_left]
-        add2_y_py = lightConeSlice['pc'].plots[0]['py'][add_y_left]
+        add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
+        add2_y_px = proj['px'][add_y_left]
+        add2_y_py = proj['py'][add_y_left]
         add2_y_py += na.ceil(lightConeSlice['WidthBoxFraction'])
-        add2_y_pdx = lightConeSlice['pc'].plots[0]['pdx'][add_y_left]
-        add2_y_pdy = lightConeSlice['pc'].plots[0]['pdy'][add_y_left]
-        add2_y_field = lightConeSlice['pc'].plots[0][field][add_y_left]
-        add2_y_weight_field = lightConeSlice['pc'].plots[0]['weight_field'][add_y_left]
+        add2_y_pdx = proj['pdx'][add_y_left]
+        add2_y_pdy = proj['pdy'][add_y_left]
+        add2_y_field = proj[field][add_y_left]
+        add2_y_weight_field = proj['weight_field'][add_y_left]
         del add_y_left
 
         # Add the hanging cells back to the projection data.
-        lightConeSlice['pc'].plots[0].data['px'] = \
-            na.concatenate([lightConeSlice['pc'].plots[0]['px'], add_x_px, add_y_px, 
-                            add2_x_px, add2_y_px])
-        lightConeSlice['pc'].plots[0].data['py'] = \
-            na.concatenate([lightConeSlice['pc'].plots[0]['py'], add_x_py, add_y_py, 
-                            add2_x_py, add2_y_py])
-        lightConeSlice['pc'].plots[0].data['pdx'] = \
-            na.concatenate([lightConeSlice['pc'].plots[0]['pdx'], add_x_pdx, add_y_pdx, 
-                            add2_x_pdx, add2_y_pdx])
-        lightConeSlice['pc'].plots[0].data['pdy'] = \
-            na.concatenate([lightConeSlice['pc'].plots[0]['pdy'], add_x_pdy, add_y_pdy, 
-                            add2_x_pdy, add2_y_pdy])
-        lightConeSlice['pc'].plots[0].data[field] = \
-            na.concatenate([lightConeSlice['pc'].plots[0][field], add_x_field, add_y_field, 
-                            add2_x_field, add2_y_field])
-        lightConeSlice['pc'].plots[0].data['weight_field'] = \
-            na.concatenate([lightConeSlice['pc'].plots[0]['weight_field'], 
-                            add_x_weight_field, add_y_weight_field,
-                            add2_x_weight_field, add2_y_weight_field])
+        proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px, 
+                                     add2_x_px, add2_y_px])
+        proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py, 
+                                     add2_x_py, add2_y_py])
+        proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx, 
+                                      add2_x_pdx, add2_y_pdx])
+        proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy, 
+                                      add2_x_pdy, add2_y_pdy])
+        proj[field] = na.concatenate([proj[field], add_x_field, add_y_field, 
+                                      add2_x_field, add2_y_field])
+        proj['weight_field'] = na.concatenate([proj['weight_field'], 
+                                               add_x_weight_field, add_y_weight_field,
+                                               add2_x_weight_field, add2_y_weight_field])
 
         # Delete original copies of hanging cells.
         del add_x_px, add_y_px, add2_x_px, add2_y_px
@@ -236,42 +210,30 @@
         # Tiles were made rounding up the width to the nearest integer.
         # Cut off the edges to get the specified width.
         # Cut in the x direction.
-        cut_x = lightConeSlice['pc'].plots[0].data['px'] - \
-            0.5 * lightConeSlice['pc'].plots[0].data['pdx'] < lightConeSlice['WidthBoxFraction']
-        lightConeSlice['pc'].plots[0].data['px'] = lightConeSlice['pc'].plots[0].data['px'][cut_x]
-        lightConeSlice['pc'].plots[0].data['py'] = lightConeSlice['pc'].plots[0].data['py'][cut_x]
-        lightConeSlice['pc'].plots[0].data['pdx'] = lightConeSlice['pc'].plots[0].data['pdx'][cut_x]
-        lightConeSlice['pc'].plots[0].data['pdy'] = lightConeSlice['pc'].plots[0].data['pdy'][cut_x]
-        lightConeSlice['pc'].plots[0].data[field] = lightConeSlice['pc'].plots[0].data[field][cut_x]
-        lightConeSlice['pc'].plots[0].data['weight_field'] = \
-            lightConeSlice['pc'].plots[0].data['weight_field'][cut_x]
+        cut_x = proj['px'] - 0.5 * proj['pdx'] < lightConeSlice['WidthBoxFraction']
+        proj['px'] = proj['px'][cut_x]
+        proj['py'] = proj['py'][cut_x]
+        proj['pdx'] = proj['pdx'][cut_x]
+        proj['pdy'] = proj['pdy'][cut_x]
+        proj[field] = proj[field][cut_x]
+        proj['weight_field'] = proj['weight_field'][cut_x]
         del cut_x
 
         # Cut in the y direction.
-        cut_y = lightConeSlice['pc'].plots[0].data['py'] - \
-            0.5 * lightConeSlice['pc'].plots[0].data['pdy'] < lightConeSlice['WidthBoxFraction']
-        lightConeSlice['pc'].plots[0].data['px'] = lightConeSlice['pc'].plots[0].data['px'][cut_y]
-        lightConeSlice['pc'].plots[0].data['py'] = lightConeSlice['pc'].plots[0].data['py'][cut_y]
-        lightConeSlice['pc'].plots[0].data['pdx'] = lightConeSlice['pc'].plots[0].data['pdx'][cut_y]
-        lightConeSlice['pc'].plots[0].data['pdy'] = lightConeSlice['pc'].plots[0].data['pdy'][cut_y]
-        lightConeSlice['pc'].plots[0].data[field] = lightConeSlice['pc'].plots[0].data[field][cut_y]
-        lightConeSlice['pc'].plots[0].data['weight_field'] = lightConeSlice['pc'].plots[0].data['weight_field'][cut_y]
+        cut_y = proj['py'] - 0.5 * proj['pdy'] < lightConeSlice['WidthBoxFraction']
+        proj['px'] = proj['px'][cut_y]
+        proj['py'] = proj['py'][cut_y]
+        proj['pdx'] = proj['pdx'][cut_y]
+        proj['pdy'] = proj['pdy'][cut_y]
+        proj[field] = proj[field][cut_y]
+        proj['weight_field'] = proj['weight_field'][cut_y]
         del cut_y
 
-        # Save an image if requested.
-        if save_image:
-            lightConeSlice['pc'].set_xlim(0, lightConeSlice['WidthBoxFraction'])
-            lightConeSlice['pc'].set_ylim(0, lightConeSlice['WidthBoxFraction'])
-            if add_redshift_label:
-                lightConeSlice['pc'].plots[-1].modify['text']((0.5, 0.03), "z = %.3f" % lightConeSlice['redshift'], 
-                                                              dict(color='black',size=50))
-            lightConeSlice['pc'].save(name)
-
         # Create fixed resolution buffer to return back to the light cone object.
         # These buffers will be stacked together to make the light cone.
-        frb = FixedResolutionBuffer(lightConeSlice['pc'].plots[0].data, \
-                                        (0, lightConeSlice['WidthBoxFraction'], 
-                                         0, lightConeSlice['WidthBoxFraction']),
+        frb = FixedResolutionBuffer(proj, 
+                                    (0, lightConeSlice['WidthBoxFraction'], 
+                                     0, lightConeSlice['WidthBoxFraction']),
                                     (pixels, pixels), antialias=False)
 
         return frb


http://bitbucket.org/yt_analysis/yt/changeset/b25571847b21/
changeset:   r3828:b25571847b21
branch:      yt
user:        brittonsmith
date:        2011-03-14 21:09:10
summary:     Added image writing support with image_writer.
affected #:  2 files (252 bytes)

--- a/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 15:35:25 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 16:09:10 2011 -0400
@@ -34,11 +34,12 @@
 from yt.config import ytcfg
 from yt.convenience import load
 from yt.utilities.cosmology import Cosmology
+from yt.visualization.image_writer import write_image
 
 from .common_n_volume import commonNVolume
 from .halo_mask import light_cone_halo_map, \
     light_cone_halo_mask
-from .light_cone_projection import LightConeProjection
+from .light_cone_projection import _light_cone_projection
 
 class LightCone(EnzoSimulation):
     def __init__(self, EnzoParameterFile, initial_redshift=1.0, 
@@ -233,8 +234,8 @@
             del halo_mask_cube
 
     def project_light_cone(self, field, weight_field=None, apply_halo_mask=False, node=None,
-                           save_stack=True, save_slice_images=False, flatten_stack=False, photon_field=False,
-                           add_redshift_label=False, **kwargs):
+                           save_stack=True, save_slice_images=False, cmap_name='algae', 
+                           flatten_stack=False, photon_field=False):
         """
         Create projections for light cone, then add them together.
         :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
@@ -270,10 +271,12 @@
                                               node, q, len(self.light_cone_solution))
             output['object'] = load(output['filename'])
             output['object'].parameters.update(self.set_parameters)
-            frb = LightConeProjection(output, field, self.pixels, weight_field=weight_field,
-                                      save_image=save_slice_images,
-                                      name=name, node=node, **kwargs)
+            frb = _light_cone_projection(output, field, self.pixels, 
+                                         weight_field=weight_field, node=node)
             if ytcfg.getint("yt", "__parallel_rank") == 0:
+                if save_slice_images:
+                    write_image(frb[field], "%s_%s.png" % (name, field), cmap_name=cmap_name)
+
                 if photon_field:
                     # Decrement the flux by the luminosity distance. Assume field in frb is in erg/s/cm^2/Hz
                     co = Cosmology(HubbleConstantNow = (100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
@@ -287,7 +290,6 @@
                     mylog.info("Distance to slice = %e" % dL)
                     frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
                 if weight_field is not None:
                     # Data come back normalized by the weight field.
                     # Undo that so it can be added up for the light cone.
@@ -327,6 +329,10 @@
             # but replace the data with the full light cone projection data.
             frb.data[field] = lightConeProjection
 
+            # Write image.
+            if save_slice_images:
+                write_image(frb[field], "%s_%s.png" % (filename, field), cmap_name=cmap_name)
+
             # Write stack to hdf5 file.
             if save_stack:
                 self._save_light_cone_stack(field=field, weight_field=weight_field, filename=filename)


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Mon Mar 14 15:35:25 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Mon Mar 14 16:09:10 2011 -0400
@@ -34,9 +34,8 @@
     parallel_blocking_call
 
 @parallel_blocking_call
-def LightConeProjection(lightConeSlice, field, pixels, weight_field=None, 
-                        save_image=False, name="", node=None, field_cuts=None,
-                        add_redshift_label=False, **kwargs):
+def _light_cone_projection(lightConeSlice, field, pixels, weight_field=None, 
+                           save_image=False, node=None, field_cuts=None):
     "Create a single projection to be added into the light cone stack."
 
     # Use some projection parameters to seed random number generator to make unique node name.


http://bitbucket.org/yt_analysis/yt/changeset/f09520ee2fd4/
changeset:   r3829:f09520ee2fd4
branch:      yt
user:        brittonsmith
date:        2011-03-14 21:29:22
summary:     Updated light cone docstring.
affected #:  1 file (73 bytes)

--- a/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 16:09:10 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Mon Mar 14 16:29:22 2011 -0400
@@ -247,6 +247,7 @@
         :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
                an hdf5 file.  Default: True.
         :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
+        :param cmap_name (str): color map for images.  Default: 'algae'.
         :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
                in order to save memory.  This is generally not necessary.  Default: False.
         :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 


http://bitbucket.org/yt_analysis/yt/changeset/899b12282a4f/
changeset:   r3830:899b12282a4f
branch:      yt
user:        brittonsmith
date:        2011-03-15 16:21:56
summary:     Eliminated use of PlotCollection in halo profiler.
affected #:  2 files (427 bytes)

--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Mar 14 16:29:22 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Mar 15 11:21:56 2011 -0400
@@ -46,8 +46,7 @@
     parallel_root_only
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
-from yt.visualization.plot_collection import \
-    PlotCollection
+from yt.visualization.image_writer import write_image
 
 PROFILE_RADIUS_THRESHOLD = 2
 
@@ -259,10 +258,11 @@
 
         self.profile_fields.append({'field':field, 'weight_field':weight_field, 'accumulation':accumulation})
 
-    def add_projection(self, field, weight_field=None):
+    def add_projection(self, field, weight_field=None, cmap='algae'):
         "Add a field for projection."
 
-        self.projection_fields.append({'field':field, 'weight_field':weight_field})
+        self.projection_fields.append({'field':field, 'weight_field':weight_field, 
+                                       'cmap': cmap})
 
     @parallel_blocking_call
     def make_profiles(self, filename=None, prefilters=None, **kwargs):
@@ -466,14 +466,13 @@
             return
 
         # Set resolution for fixed resolution output.
-        if save_cube:
-            if self.project_at_level == 'max':
-                proj_level = self.pf.h.max_level
-            else:
-                proj_level = int(self.project_at_level)
-            proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
-                (self.pf.parameters['RefineBy']**proj_level)
-            projectionResolution = int(self.projection_width / proj_dx)
+        if self.project_at_level == 'max':
+            proj_level = self.pf.h.max_level
+        else:
+            proj_level = int(self.project_at_level)
+        proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
+            (self.pf.parameters['RefineBy']**proj_level)
+        projectionResolution = int(self.projection_width / proj_dx)
 
         # Create output directory.
         if self.output_dir is not None:
@@ -515,8 +514,7 @@
             # Make projections.
             if not isinstance(axes, types.ListType): axes = list([axes])
             for w in axes:
-                # Create a plot collection.
-                pc = PlotCollection(self.pf, center=center)
+                projections = []
                 # YT projections do not follow the right-hand rule.
                 coords = range(3)
                 del coords[w]
@@ -524,12 +522,15 @@
                 y_axis = coords[1]
 
                 for hp in self.projection_fields:
-                    pc.add_projection(hp['field'], w, weight_field=hp['weight_field'], data_source=region)
+                    projections.append(self.pf.h.proj(w, hp['field'], 
+                                                      weight_field=hp['weight_field'], 
+                                                      data_source=region, center=halo['center'],
+                                                      serialize=False))
                 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:
                     pw = self.projection_width/self.pf.units[self.projection_width_units]
-                    shift_projections(self.pf, pc, halo['center'], center, w)
+                    #shift_projections(self.pf, projections, halo['center'], center, w)
                     # Projection has now been shifted to center of box.
                     proj_left = [center[x_axis]-0.5*pw, center[y_axis]-0.5*pw]
                     proj_right = [center[x_axis]+0.5*pw, center[y_axis]+0.5*pw]
@@ -537,30 +538,33 @@
                     proj_left = [leftEdge[x_axis], leftEdge[y_axis]]
                     proj_right = [rightEdge[x_axis], rightEdge[y_axis]]
 
-                pc.set_xlim(proj_left[0], proj_right[0])
-                pc.set_ylim(proj_left[1], proj_right[1])
+                # Save projection data to hdf5 file.
+                if save_cube or save_images:
+                    axis_labels = ['x', 'y', 'z']
 
-                # Save projection data to hdf5 file.
-                if save_cube:
-                    axis_labels = ['x', 'y', 'z']
-                    dataFilename = "%s/Halo_%04d_%s_data.h5" % \
+                    if save_cube:
+                        dataFilename = "%s/Halo_%04d_%s_data.h5" % \
                             (my_output_dir, halo['id'], axis_labels[w])
-                    mylog.info("Saving projection data to %s." % dataFilename)
+                        mylog.info("Saving projection data to %s." % dataFilename)
+                        output = h5py.File(dataFilename, "a")
 
-                    output = h5py.File(dataFilename, "a")
                     # Create fixed resolution buffer for each projection and write them out.
                     for e, hp in enumerate(self.projection_fields):
-                        frb = FixedResolutionBuffer(pc.plots[e].data, (proj_left[0], proj_right[0], 
-                                                                       proj_left[1], proj_right[1]),
-                                                          (projectionResolution, projectionResolution),
-                                                          antialias=False)
+                        frb = FixedResolutionBuffer(projections[e], (proj_left[0], proj_right[0], 
+                                                                     proj_left[1], proj_right[1]),
+                                                    (projectionResolution, projectionResolution),
+                                                    antialias=False)
                         dataset_name = "%s_%s" % (hp['field'], hp['weight_field'])
-                        if dataset_name in output.listnames(): del output[dataset_name]
-                        output.create_dataset(dataset_name, data=frb[hp['field']])
-                    output.close()
+                        if save_cube:
+                            if dataset_name in output.listnames(): del output[dataset_name]
+                            output.create_dataset(dataset_name, data=frb[hp['field']])
 
-                if save_images:
-                    pc.save("%s/Halo_%04d" % (my_output_dir, halo['id']), force_save=True)
+                        if save_images:
+                            filename = "%s/Halo_%04d_%s_%s.png" % (my_output_dir, halo['id'], 
+                                                                   dataset_name, axis_labels[w])
+                            write_image(frb[hp['field']], filename, cmap_name=hp['cmap'])
+
+                    if save_cube: output.close()
 
             del region
 
@@ -789,7 +793,7 @@
         else:
             os.mkdir(my_output_dir)
 
-def shift_projections(pf, pc, oldCenter, newCenter, axis):
+def shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.
     This is necessary when projecting a preiodic region.
@@ -801,10 +805,10 @@
     del offset[axis]
     del width[axis]
 
-    for plot in pc.plots:
+    for plot in projections:
         # Get name of data field.
         other_fields = {'px':True, 'py':True, 'pdx':True, 'pdy':True, 'weight_field':True}
-        for pfield in plot.data.data.keys():
+        for pfield in plot.data.keys():
             if not(other_fields.has_key(pfield)):
                 field = pfield
                 break


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Mon Mar 14 16:29:22 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Tue Mar 15 11:21:56 2011 -0400
@@ -54,9 +54,9 @@
 
     mylog.info("Making projection at z = %f from %s." % (lightConeSlice['redshift'], lightConeSlice['filename']))
 
-    region_center = [0.5 * (lightConeSlice['object'].parameters['DomainRightEdge'][q] +
-                            lightConeSlice['object'].parameters['DomainLeftEdge'][q]) \
-                         for q in range(lightConeSlice['object'].parameters['TopGridRank'])]
+    region_center = [0.5 * (lightConeSlice['object'].domain_right_edge[q] +
+                            lightConeSlice['object'].domain_left_edge[q]) \
+                         for q in range(lightConeSlice['object'].pf.dimensionality)]
 
     # 1. The Depth Problem
     # Use coordinate field cut in line of sight to cut projection to proper depth.
@@ -230,9 +230,8 @@
 
         # Create fixed resolution buffer to return back to the light cone object.
         # These buffers will be stacked together to make the light cone.
-        frb = FixedResolutionBuffer(proj, 
-                                    (0, lightConeSlice['WidthBoxFraction'], 
-                                     0, lightConeSlice['WidthBoxFraction']),
+        frb = FixedResolutionBuffer(proj, (0, lightConeSlice['WidthBoxFraction'], 
+                                           0, lightConeSlice['WidthBoxFraction']),
                                     (pixels, pixels), antialias=False)
 
         return frb


http://bitbucket.org/yt_analysis/yt/changeset/6de6b5c6a9ff/
changeset:   r3831:6de6b5c6a9ff
branch:      yt
user:        brittonsmith
date:        2011-03-15 17:29:36
summary:     Fixed light cone bug.
affected #:  1 file (3 bytes)

--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Tue Mar 15 11:21:56 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Tue Mar 15 12:29:36 2011 -0400
@@ -56,7 +56,7 @@
 
     region_center = [0.5 * (lightConeSlice['object'].domain_right_edge[q] +
                             lightConeSlice['object'].domain_left_edge[q]) \
-                         for q in range(lightConeSlice['object'].pf.dimensionality)]
+                         for q in range(lightConeSlice['object'].dimensionality)]
 
     # 1. The Depth Problem
     # Use coordinate field cut in line of sight to cut projection to proper depth.


http://bitbucket.org/yt_analysis/yt/changeset/0be5d6d20449/
changeset:   r3832:0be5d6d20449
branch:      yt
user:        brittonsmith
date:        2011-03-15 19:35:04
summary:     Merged.
affected #:  0 files (0 bytes)

--- a/yt/frontends/orion/data_structures.py	Tue Mar 15 12:29:36 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Tue Mar 15 14:35:04 2011 -0400
@@ -470,6 +470,7 @@
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["DualEnergyFormalism"] = 0 # always off.
         self.parameters["EOSType"] = -1 # default
+
         if self.fparameters.has_key("mu"):
             self.parameters["mu"] = self.fparameters["mu"]
 
@@ -504,6 +505,9 @@
                 self.fparameter_filename, 'probin')
         if os.path.isfile(self.fparameter_filename):
             self._parse_fparameter_file()
+            for param in self.fparameters:
+                if orion2enzoDict.has_key(param):
+                    self.parameters[orion2enzoDict[param]]=self.fparameters[param]
         # Let's read the file
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[ST_CTIME])
@@ -541,6 +545,20 @@
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.refine_by = self.parameters["RefineBy"]
 
+        if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
+            self.cosmological_simulation = 1
+            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
+            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
+            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
+            a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
+            line = a_file.readline().strip()
+            a_file.close()
+            self.parameters["CosmologyCurrentRedshift"] = 1/float(line) - 1
+            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
     def _parse_fparameter_file(self):
         """
         Parses the fortran parameter file for Orion. Most of this will
@@ -574,6 +592,7 @@
         n_fields = int(lines[1])
         self.current_time = float(lines[3+n_fields])
 
+
                 
     def _set_units(self):
         """


--- a/yt/frontends/orion/definitions.py	Tue Mar 15 12:29:36 2011 -0400
+++ b/yt/frontends/orion/definitions.py	Tue Mar 15 14:35:04 2011 -0400
@@ -63,7 +63,12 @@
 # throughout the code. key is Orion name, value is Enzo/yt equivalent
 orion2enzoDict = {"amr.n_cell": "TopGridDimensions",
                   "materials.gamma": "Gamma",
-                  "amr.ref_ratio": "RefineBy"
+                  "amr.ref_ratio": "RefineBy",
+                  "castro.use_comoving": "ComovingCoordinates",
+                  "castro.redshift_in": "CosmologyInitialRedshift",
+                  "comoving_OmL": "CosmologyOmegaLambdaNow",
+                  "comoving_OmM": "CosmologyOmegaMatterNow",
+                  "comoving_h": "CosmologyHubbleConstantNow"
                   }
 
 yt2orionFieldsDict = {}


--- a/yt/utilities/_amr_utils/Octree.pyx	Tue Mar 15 12:29:36 2011 -0400
+++ b/yt/utilities/_amr_utils/Octree.pyx	Tue Mar 15 14:35:04 2011 -0400
@@ -51,8 +51,8 @@
         self.val[i] += val[i]
     self.weight_val += weight_val
 
-cdef void OTN_refine(OctreeNode *self):
-    cdef int i, j, i1, j1
+cdef void OTN_refine(OctreeNode *self, int incremental = 0):
+    cdef int i, j, k, i1, j1
     cdef np.int64_t npos[3]
     cdef OctreeNode *node
     for i in range(2):
@@ -66,6 +66,7 @@
                             npos,
                             self.nvals, self.val, self.weight_val,
                             self.level + 1)
+    if incremental: return
     for i in range(self.nvals): self.val[i] = 0.0
     self.weight_val = 0.0
 
@@ -73,7 +74,7 @@
                         np.float64_t *val, np.float64_t weight_val,
                         int level):
     cdef OctreeNode *node
-    cdef int i, j
+    cdef int i, j, k
     node = <OctreeNode *> malloc(sizeof(OctreeNode))
     node.pos[0] = pos[0]
     node.pos[1] = pos[1]
@@ -92,7 +93,7 @@
     return node
 
 cdef void OTN_free(OctreeNode *node):
-    cdef int i, j
+    cdef int i, j, k
     for i in range(2):
         for j in range(2):
             for k in range(2):
@@ -106,10 +107,12 @@
     cdef np.int64_t po2[80]
     cdef OctreeNode ****root_nodes
     cdef np.int64_t top_grid_dims[3]
+    cdef int incremental
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
-                  int nvals):
-        cdef int i, j
+                  int nvals, int incremental = False):
+        cdef int i, j, k
+        self.incremental = incremental
         cdef OctreeNode *node
         cdef np.int64_t pos[3]
         cdef np.float64_t *vals = <np.float64_t *> alloca(
@@ -147,13 +150,15 @@
                  int level, np.int64_t pos[3],
                  np.float64_t *val,
                  np.float64_t weight_val):
-        cdef int i, j
+        cdef int i, j, k, L
         cdef OctreeNode *node
         node = self.find_on_root_level(pos, level)
         cdef np.int64_t fac
         for L in range(level):
+            if self.incremental:
+                OTN_add_value(node, val, weight_val)
             if node.children[0][0][0] == NULL:
-                OTN_refine(node)
+                OTN_refine(node, self.incremental)
             # Maybe we should use bitwise operators?
             fac = self.po2[level - L - 1]
             i = (pos[0] >= fac*(2*node.pos[0]+1))
@@ -165,7 +170,7 @@
     cdef OctreeNode *find_on_root_level(self, np.int64_t pos[3], int level):
         # We need this because the root level won't just have four children
         # So we find on the root level, then we traverse the tree.
-        cdef np.int64_t i, j
+        cdef np.int64_t i, j, k
         i = <np.int64_t> (pos[0] / self.po2[level])
         j = <np.int64_t> (pos[1] / self.po2[level])
         k = <np.int64_t> (pos[2] / self.po2[level])
@@ -202,7 +207,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_all_from_level(self, int level, int count_only = 0):
-        cdef int i, j
+        cdef int i, j, k
         cdef int total = 0
         vals = []
         for i in range(self.top_grid_dims[0]):
@@ -214,7 +219,7 @@
         cdef np.ndarray[np.int64_t, ndim=2] npos
         cdef np.ndarray[np.float64_t, ndim=2] nvals
         cdef np.ndarray[np.float64_t, ndim=1] nwvals
-        npos = np.zeros( (total, 2), dtype='int64')
+        npos = np.zeros( (total, 3), dtype='int64')
         nvals = np.zeros( (total, self.nvals), dtype='float64')
         nwvals = np.zeros( total, dtype='float64')
         cdef np.int64_t curpos = 0
@@ -229,10 +234,11 @@
         return npos, nvals, nwvals
 
     cdef int count_at_level(self, OctreeNode *node, int level):
-        cdef int i, j
+        cdef int i, j, k
         # We only really return a non-zero, calculated value if we are at the
         # level in question.
         if node.level == level:
+            if self.incremental: return 1
             # We return 1 if there are no finer points at this level and zero
             # if there are
             return (node.children[0][0][0] == NULL)
@@ -249,9 +255,10 @@
                               np.int64_t *pdata,
                               np.float64_t *vdata,
                               np.float64_t *wdata):
-        cdef int i, j
+        cdef int i, j, k
         if node.level == level:
-            if node.children[0][0][0] != NULL: return 0
+            if node.children[0][0][0] != NULL and not self.incremental:
+                return 0
             for i in range(self.nvals):
                 vdata[self.nvals * curpos + i] = node.val[i]
             wdata[curpos] = node.weight_val
@@ -259,7 +266,7 @@
             pdata[curpos * 3 + 1] = node.pos[1]
             pdata[curpos * 3 + 2] = node.pos[2]
             return 1
-        if node.children[0][0] == NULL: return 0
+        if node.children[0][0][0] == NULL: return 0
         cdef np.int64_t added = 0
         for i in range(2):
             for j in range(2):
@@ -269,7 +276,7 @@
         return added
 
     def __dealloc__(self):
-        cdef int i, j
+        cdef int i, j, k
         for i in range(self.top_grid_dims[0]):
             for j in range(self.top_grid_dims[1]):
                 for k in range(self.top_grid_dims[2]):


--- a/yt/utilities/amr_utils.pyx	Tue Mar 15 12:29:36 2011 -0400
+++ b/yt/utilities/amr_utils.pyx	Tue Mar 15 14:35:04 2011 -0400
@@ -45,4 +45,5 @@
 include "_amr_utils/png_writer.pyx"
 include "_amr_utils/fortran_reader.pyx"
 include "_amr_utils/QuadTree.pyx"
+include "_amr_utils/Octree.pyx"
 include "_amr_utils/freetype_writer.pyx"


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Mar 15 12:29:36 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Mar 15 14:35:04 2011 -0400
@@ -152,7 +152,7 @@
             retval = func(self, *args, **kwargs)
             self._processing = False
         retval = MPI.COMM_WORLD.bcast(retval, root=self._owner)
-        MPI.COMM_WORLD.Barrier()
+        #MPI.COMM_WORLD.Barrier()
         return retval
     return single_proc_results
 
@@ -235,7 +235,7 @@
                 all_clear = 0
         else:
             all_clear = None
-        MPI.COMM_WORLD.Barrier()
+        #MPI.COMM_WORLD.Barrier()
         all_clear = MPI.COMM_WORLD.bcast(all_clear, root=0)
         if not all_clear: raise RuntimeError
     if parallel_capable: return root_only
@@ -632,14 +632,14 @@
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        self._barrier()
+        #self._barrier()
         if MPI.COMM_WORLD.rank == 0:
             for i in range(1,MPI.COMM_WORLD.size):
                 data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
         else:
             MPI.COMM_WORLD.send(data, dest=0, tag=0)
         data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
+        #self._barrier()
         return data
 
     @parallel_passthrough
@@ -1081,7 +1081,7 @@
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        self._barrier()
+        #self._barrier()
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data
 
@@ -1115,7 +1115,7 @@
 
     @parallel_passthrough
     def _mpi_allsum(self, data):
-        self._barrier()
+        #self._barrier()
         # We use old-school pickling here on the assumption the arrays are
         # relatively small ( < 1e7 elements )
         if isinstance(data, na.ndarray) and data.dtype != na.bool:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list