[yt-svn] commit/yt: ngoldbaum: Merged in jzuhone/yt/yt-3.0 (pull request #1009)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 15 09:13:02 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/7e7ef2aaa715/
Changeset:   7e7ef2aaa715
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-15 18:12:52
Summary:     Merged in jzuhone/yt/yt-3.0 (pull request #1009)

Removing analysis modules as per Trello card
Affected #:  22 files

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
+  "signature": "sha256:ba8b6a53571695ae1d0c236ad43875823746e979a329a9d35ab0a8b899cebbba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "from yt.analysis_modules.ppv_cube.api import PPVCube"
      ],
      "language": "python",
      "metadata": {},

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
+  "signature": "sha256:4745a15abb6512547b50280b92c22567f89255189fd968ca706ef7c39d48024f"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -91,7 +91,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import SZProjection\n",
+      "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
       "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -35,7 +35,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import AbsorptionSpectrum
+  from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
   sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
 

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -26,7 +26,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightRay
+  from yt.analysis_modules.cosmological_observation.api import LightRay
   lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
                 'Enzo', 0.0, 0.1)
 

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -386,7 +386,7 @@
    from yt.mods import *
    from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
    from yt.utilities.cosmology import Cosmology
-   from yt.analysis_modules.api import *
+   from yt.analysis_modules.photon_simulator.api import *
    import aplpy
 
    R = 1000. # in kpc

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -10,7 +10,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import CosmologySplice
+  from yt.analysis_modules.cosmological_observation.api import CosmologySplice
   my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
   my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out')
 

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""
-API for yt.analysis_modules
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .absorption_spectrum.api import \
-    AbsorptionSpectrum
-
-from .coordinate_transformation.api import \
-    spherical_regrid
-
-from .cosmological_observation.api import \
-    CosmologySplice, \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones, \
-    LightRay
-
-from .halo_finding.api import \
-    Halo, \
-    HOPHalo, \
-    parallelHOPHalo, \
-    LoadedHalo, \
-    FOFHalo, \
-    HaloList, \
-    HOPHaloList, \
-    FOFHaloList, \
-    parallelHOPHaloList, \
-    LoadedHaloList, \
-    GenericHaloFinder, \
-    parallelHF, \
-    HOPHaloFinder, \
-    FOFHaloFinder, \
-    HaloFinder, \
-    LoadHaloes
-
-from .halo_mass_function.api import \
-    HaloMassFcn, \
-    TransferFunction, \
-    integrate_inf
-
-from .halo_merger_tree.api import \
-    DatabaseFunctions, \
-    MergerTree, \
-    MergerTreeConnect, \
-    Node, \
-    Link, \
-    MergerTreeDotOutput, \
-    MergerTreeTextOutput
-
-from .halo_profiler.api import \
-    VirialFilter, \
-    HaloProfiler, \
-    FakeProfile
-
-from .level_sets.api import \
-    identify_contours, \
-    Clump, \
-    find_clumps, \
-    get_lowest_clumps, \
-    write_clump_index, \
-    write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
-    _DistanceToMainClump, \
-    recursive_all_clumps, \
-    return_all_clumps, \
-    return_bottom_clumps, \
-    recursive_bottom_clumps, \
-    clump_list_sort
-
-from .radial_column_density.api import \
-    RadialColumnDensity
-
-from .spectral_integrator.api import \
-     add_xray_emissivity_field
-
-from .star_analysis.api import \
-    StarFormationRate, \
-    SpectrumBuilder
-
-from .two_point_functions.api import \
-    TwoPointFunctions, \
-    FcnSet
-
-from .sunyaev_zeldovich.api import SZProjection
-
-from .radmc3d_export.api import \
-    RadMC3DWriter
-
-from .particle_trajectories.api import \
-    ParticleTrajectories
-
-from .photon_simulator.api import \
-     PhotonList, \
-     EventList, \
-     SpectralModel, \
-     XSpecThermalModel, \
-     XSpecAbsorbModel, \
-     TableApecModel, \
-     TableAbsorbModel, \
-     PhotonModel, \
-     ThermalPhotonModel
-
-from .ppv_cube.api import \
-    PPVCube

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/coordinate_transformation/api.py
--- a/yt/analysis_modules/coordinate_transformation/api.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-API for coordinate_transformation
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .transforms import \
-    spherical_regrid

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/coordinate_transformation/setup.py
--- a/yt/analysis_modules/coordinate_transformation/setup.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('coordinate_transformation',
-        parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""
-Transformations between coordinate systems
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.funcs import *
-
-from yt.utilities.linear_interpolators import \
-    TrilinearFieldInterpolator
-
-def spherical_regrid(pf, nr, ntheta, nphi, rmax, fields,
-                     center=None, smoothed=True):
-    """
-    This function takes a parameter file (*pf*) along with the *nr*, *ntheta*
-    and *nphi* points to generate out to *rmax*, and it grids *fields* onto
-    those points and returns a dict.  *center* if supplied will be the center,
-    otherwise the most dense point will be chosen.  *smoothed* governs whether
-    regular covering grids or smoothed covering grids will be used.
-    """
-    mylog.warning("This code may produce some artifacts of interpolation")
-    mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
-    if center is None: center = pf.h.find_max("Density")[1]
-    fields = ensure_list(fields)
-    r,theta,phi = np.mgrid[0:rmax:nr*1j,
-                           0:np.pi:ntheta*1j,
-                           0:2*np.pi:nphi*1j]
-    new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
-    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
-    new_grid['z'] = r*np.cos(theta)             + center[2]
-    sphere = pf.sphere(center, rmax)
-    return arbitrary_regrid(new_grid, sphere, fields, smoothed)
-
-def arbitrary_regrid(new_grid, data_source, fields, smoothed=True):
-    """
-    This function accepts a dict of points 'x', 'y' and 'z' and a data source
-    from which to interpolate new points, along with a list of fields it needs
-    to regrid onto those xyz points.  It then returns interpolated points.
-    This has not been well-tested other than for regular spherical regridding.
-    """
-    fields = ensure_list(fields)
-    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
-    for field in fields:
-        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = np.argsort(data_source.grid_levels[:,0])
-    ng = len(data_source._grids)
-
-    for i,grid in enumerate(data_source._grids[grid_order][::-1]):
-        mylog.info("Regridding grid % 4i / % 4i (%s - %s)", i, ng, grid.id, grid.Level)
-        cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
-
-        # makes x0,x1,y0,y1,z0,z1
-        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
-
-        
-        # Now we figure out which of our points are inside this grid
-        # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
-        for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
-            # &= does a logical_and on the array
-            point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
-                         & ( new_grid[ax]     <= grid.RightEdge[i] ) )
-        point_ind &= (new_grid['handled'] == False) # only want unhandled points
-
-        # If we don't have any, we can just leave
-        if point_ind.sum() == 0: continue
-
-        # because of the funky way the interpolator takes points, we have to make a
-        # new dict of just the points inside this grid
-        point_grid = {'x' : new_grid['x'][point_ind],
-                      'y' : new_grid['y'][point_ind],
-                      'z' : new_grid['z'][point_ind]}
-
-        # Now we know which of the points in new_grid are inside this grid
-        for field in fields:
-            interpolator = TrilinearFieldInterpolator(
-                cg[field],bounds,['x','y','z'])
-            new_grid[field][point_ind] = interpolator(point_grid)
-
-        new_grid['handled'][point_ind] = True
-
-    mylog.info("Finished with %s dangling points",
-        new_grid['handled'].size - new_grid['handled'].sum())
-
-    return new_grid
-
-"""
-# The following will work to plot through different slices:
-
-import pylab
-for i in range(n_theta):
-    print "Doing % 3i / % 3i" % (i, n_theta)
-    pylab.clf()
-    ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
-    ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  np.log10(sph_grid[field][:,i,:]))
-    pylab.savefig("polar/latitude_%03i.png" % i)
-
-for i in range(n_phi):
-    print "Doing % 3i / % 3i" % (i, n_phi)
-    pylab.clf()
-    ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
-    ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  np.log10(sph_grid[field][:,:,i]))
-    pylab.savefig("polar/longitude_%03i.png" % i)
-"""

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-API for halo_profiler
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .halo_filters import \
-    VirialFilter
-
-from .multi_halo_profiler import \
-    HaloProfiler, \
-    FakeProfile, \
-    standard_fields

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-HaloProfiler re-centering functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-
-from yt.fields.local_fields import \
-    add_field
-
-centering_registry = {}
-
-def add_function(name):
-   def wrapper(func):
-       centering_registry[name] = func
-       return func
-   return wrapper
-
-#### Dark Matter Density ####
-
- at add_function("Min_Dark_Matter_Density")
-def find_minimum_dm_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Dark_Matter_Density")
-def find_maximum_dm_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Dark_Matter_Density")
-def find_CoM_dm_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False, 
-                                                      use_particles=True,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Gas Density ####
-
- at add_function("Min_Gas_Density")
-def find_minimum_gas_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Gas_Density")
-def find_maximum_gas_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Gas_Density")
-def find_CoM_gas_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
-                                                      use_particles=False,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Total Density ####
-
- at add_function("Min_Total_Density")
-def find_minimum_total_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Total_Density")
-def find_maximum_total_density(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("CoM_Total_Density")
-def find_CoM_total_density(data):
-   dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
-                                                      use_particles=True,
-                                                      preload=False)
-   return (dc_x, dc_y, dc_z)
-
-#### Temperature ####
-
- at add_function("Min_Temperature")
-def find_minimum_temperature(data):
-    ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
-                                                              preload=False)
-    return (mx, my, mz)
-
- at add_function("Max_Temperature")
-def find_maximum_temperature(data):
-    ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
-                                                              preload=False)
-    return (mx, my, mz)
-

diff -r 2e2d9a9bc75add24c98e8fc13217e3033985e1f3 -r 7e7ef2aaa7159717cda503f44cdcf379f8060aa2 yt/analysis_modules/halo_profiler/halo_filters.py
--- a/yt/analysis_modules/halo_profiler/halo_filters.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-Halo filters to be used with the HaloProfiler.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from copy import deepcopy
-import numpy as np
-
-from yt.funcs import *
-from yt.utilities.physical_constants import TINY
-
-def VirialFilter(profile, overdensity_field='ActualOverdensity',
-                 virial_overdensity=200., must_be_virialized=True,
-                 virial_filters=[['TotalMassMsun', '>=','1e14']],
-                 virial_quantities=['TotalMassMsun', 'RadiusMpc'],
-                 virial_index=None, use_log=False):
-    r"""Filter halos by virial quantities.
-    
-    Return values are a True or False whether the halo passed the filter, 
-    along with a dictionary of virial quantities for the fields specified in 
-    the virial_quantities keyword.  Thresholds for virial quantities are 
-    given with the virial_filters keyword in the following way:
-    [field, condition, value].
-    
-    This is typically used as part of a call to `add_halo_filter`.
-    
-    Parameters
-    ----------
-    overdensity_field : string
-        The field used for interpolation with the 
-        specified critical value given with 'virial_overdensity'.  
-        Default='ActualOverdensity'.
-    virial_overdensity : float
-        The value used to determine the outer radius of the virialized halo.
-        Default: 200.
-    must_be_virialized : bool
-        If no values in the profile are above the 
-        value of virial_overdensity, the halo does not pass the filter.  
-        Default: True.
-    virial_filters : array_like
-        Conditional filters based on virial quantities 
-        given in the following way: [field, condition, value].  
-        Default: [['TotalMassMsun', '>=','1e14']].
-    virial_quantities : array_like
-        Fields for which interpolated values should 
-        be calculated and returned.  Default: ['TotalMassMsun', 'RadiusMpc'].
-    virial_index : array_like
-        If given as a list, the index of the radial profile 
-        which is used for interpolation is placed here.  Default: None.
-    use_log : bool
-        If True, interpolation is done in log space.  
-        Default: False.
-    
-    Examples
-    --------
-    >>> hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True,
-                   overdensity_field='ActualOverdensity',
-                   virial_overdensity=200,
-                   virial_filters=[['TotalMassMsun','>=','1e14']],
-                   virial_quantities=['TotalMassMsun','RadiusMpc'])
-    
-    """
-
-    fields = deepcopy(virial_quantities)
-    if virial_filters is None: virial_filters = []
-    for vfilter in virial_filters:
-        if not vfilter[0] in fields:
-            fields.append(vfilter[0])
-    
-    overDensity = []
-    temp_profile = dict((field, []) for field in fields)
-
-    for q in range(len(profile[overdensity_field])):
-        good = True
-        if (profile[overdensity_field][q] != profile[overdensity_field][q]):
-            good = False
-            continue
-        for field in fields:
-            if (profile[field][q] != profile[field][q]):
-                good = False
-                break
-        if good:
-            overDensity.append(profile[overdensity_field][q])
-            for field in fields:
-                temp_profile[field].append(profile[field][q])
-
-    if use_log:
-        for field in temp_profile.keys():
-            temp_profile[field] = np.log10(np.clip(temp_profile[field], TINY,
-                                                   max(temp_profile[field])))
-
-    virial = dict((field, 0.0) for field in fields)
-
-    if (not (np.array(overDensity) >= virial_overdensity).any()) and \
-            must_be_virialized:
-        mylog.debug("This halo is not virialized!")
-        return [False, {}]
-
-    if (len(overDensity) < 2):
-        mylog.debug("Skipping halo with no valid points in profile.")
-        return [False, {}]
-
-    if (overDensity[1] <= virial_overdensity):
-        index = 0
-    elif (overDensity[-1] >= virial_overdensity):
-        index = -2
-    else:
-        for q in (np.arange(len(overDensity),0,-1)-1):
-            if (overDensity[q] < virial_overdensity) and (overDensity[q-1] >= virial_overdensity):
-                index = q - 1
-                break
-
-    if type(virial_index) is list:
-        virial_index.append(index)
-
-    for field in fields:
-        if (overDensity[index+1] - overDensity[index]) == 0:
-            mylog.debug("Overdensity profile has slope of zero.")
-            return [False, {}]
-        else:
-            slope = (temp_profile[field][index+1] - temp_profile[field][index]) / \
-                (overDensity[index+1] - overDensity[index])
-            value = slope * (virial_overdensity - overDensity[index]) + \
-                temp_profile[field][index]
-            virial[field] = value
-
-    if use_log:
-        for field in virial.keys():
-            virial[field] = np.power(10, virial[field])
-
-    for vfilter in virial_filters:
-        if eval("%s %s %s" % (virial[vfilter[0]],vfilter[1],vfilter[2])):
-            mylog.debug("(%s %s %s) returned True for %s." % \
-                            (vfilter[0],vfilter[1],vfilter[2],virial[vfilter[0]]))
-            continue
-        else:
-            mylog.debug("(%s %s %s) returned False for %s." % \
-                            (vfilter[0],vfilter[1],vfilter[2],virial[vfilter[0]]))
-            return [False, {}]
-
-    return [True, dict((("%s_%s" % (q, virial_overdensity)), virial[q])
-                       for q in virial_quantities)]
-

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list