[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 27 09:27:50 PDT 2015


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/f0f1ca8d9f17/
Changeset:   f0f1ca8d9f17
Branch:      yt
User:        ngoldbaum
Date:        2015-08-14 16:17:26+00:00
Summary:     Add support for particle fields to the [Min,Max]Location derived quantities
Affected #:  1 file

diff -r 603eab147248f1a165778c76208906201eb7baae -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -33,6 +33,13 @@
 
 derived_quantity_registry = {}
 
+def get_position_fields(field, data):
+    if field[0] in data.ds.particle_types:
+        position_fields = [(field[0], 'particle_position_%s' % d) for d in 'xyz']
+    else:
+        position_fields = ['x', 'y', 'z']
+    return position_fields
+
 class RegisteredDerivedQuantity(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -544,14 +551,15 @@
     def process_chunk(self, data, field):
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, -HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         maxi = -1
         if data[field].size > 0:
             maxi = np.argmax(data[field])
             ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in 'xyz']
+            mx, my, mz = [data[ax][maxi] for ax in position_fields]
         return (ma, maxi, mx, my, mz)
 
     def reduce_intermediate(self, values):
@@ -587,14 +595,15 @@
     def process_chunk(self, data, field):
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         mini = -1
         if data[field].size > 0:
             mini = np.argmin(data[field])
             ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in 'xyz']
+            mx, my, mz = [data[ax][mini] for ax in position_fields]
         return (ma, mini, mx, my, mz)
 
     def reduce_intermediate(self, values):


https://bitbucket.org/yt_analysis/yt/commits/5215eda8d551/
Changeset:   5215eda8d551
Branch:      yt
User:        ngoldbaum
Date:        2015-08-20 18:43:24+00:00
Summary:     Merging, fixing conflicts
Affected #:  100 files

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -109,6 +109,16 @@
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
 
+Generating Spectra in Parallel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The spectrum generator can be run in parallel simply by following the procedures 
+laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
+Spectrum generation is parallelized using a multi-level strategy where each 
+absorption line is deposited by a different processor.  If the number of available 
+processors is greater than the number of lines, then the deposition of 
+individual lines will be divided over multiple processors.
+
 Fitting an Absorption Spectrum
 ------------------------------
 

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -771,7 +771,7 @@
 
 .. code-block:: python
 
-   from yt.frontends.sph.definitions import gadget_field_specs
+   from yt.frontends.gadget.definitions import gadget_field_specs
    gadget_field_specs["my_field_def"] = my_field_def
 
 Please also feel free to issue a pull request with any new field
@@ -871,7 +871,7 @@
 ----------------
 
 See :ref:`loading-numpy-array` and
-:func:`~yt.frontends.sph.data_structures.load_amr_grids` for more detail.
+:func:`~yt.frontends.stream.data_structures.load_amr_grids` for more detail.
 
 It is possible to create native yt dataset from Python's dictionary
 that describes set of rectangular patches of data of possibly varying

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -610,10 +610,8 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.camera.MosaicFisheyeCamera
    ~yt.visualization.volume_rendering.camera.FisheyeCamera
    ~yt.visualization.volume_rendering.camera.MosaicCamera
-   ~yt.visualization.volume_rendering.camera.plot_allsky_healpix
    ~yt.visualization.volume_rendering.camera.PerspectiveCamera
    ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
    ~yt.visualization.volume_rendering.camera.StereoPairCamera

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -463,6 +463,35 @@
    s.annotate_streamlines('velocity_x', 'velocity_y')
    s.save()
 
+.. _annotate-line-integral-convolution:
+
+Overplot Line Integral Convolution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: annotate_line_integral_convolution(self, field_x, field_y, \
+                                                 texture=None, kernellen=50., \
+                                                 lim=(0.5,0.6), cmap='binary', \
+                                                 alpha=0.8, const_alpha=False)
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.LineIntegralConvolutionCallback`.)
+
+   Add line integral convolution to any plot, using the ``field_x`` and ``field_y`` 
+   from the associated data. A white noise background will be used for ``texture`` 
+   as default. Adjust the bounds of ``lim`` in the range of ``[0, 1]`` which applies 
+   upper and lower bounds to the values of line integral convolution and enhance 
+   the visibility of plots. When ``const_alpha=False``, alpha will be weighted 
+   spatially by the values of line integral convolution; otherwise a constant value 
+   of the given alpha is used.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   s.annotate_line_integral_convolution('velocity_x', 'velocity_y', lim=(0.5,0.65))
+   s.save()
+
 .. _annotate-text:
 
 Overplot Text

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,3 +7,9 @@
 where=yt
 exclude=answer_testing
 with-xunit=1
+
+[flake8]
+# if we include api.py files, we get tons of spurious "imported but unused" errors
+exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+max-line-length=999
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -70,15 +70,6 @@
     x = np.asarray(u).astype(np.float64)
     y = np.asarray(a).astype(np.float64)
 
-    w = np.array([0.462243670,   0.286675505,   0.109017206, 
-                  0.0248105209,  0.00324377334, 0.000228338636, 
-                  7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
-                  2.22939365e-13])
-
-    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
-                  2.25497400,  2.78880606,  3.34785457, 3.94476404, 
-                  4.60368245,  5.38748089])
-
     # Hummer's Chebyshev Coefficients
     c = ( 0.1999999999972224, -0.1840000000029998,   0.1558399999965025, 
          -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907, 
@@ -195,7 +186,6 @@
     ## conversions
     nu1 = speed_of_light_cgs / lam1           # line freq in Hz
     nudop = v_doppler / speed_of_light_cgs * nu1   # doppler width in Hz
-    lamdop = v_doppler / speed_of_light_cgs * lam1 # doppler width in Ang
 
     ## create wavelength
     if lambda_bins is None:

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -26,6 +26,10 @@
     boltzmann_constant_cgs, \
     speed_of_light_cgs
 from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    _get_comm, \
+    parallel_objects, \
+    parallel_root_only
 
 pyfits = _astropy.pyfits
 
@@ -108,9 +112,9 @@
                                     'normalization': normalization,
                                     'index': index})
 
-    def make_spectrum(self, input_file, output_file='spectrum.h5',
-                      line_list_file='lines.txt',
-                      use_peculiar_velocity=True):
+    def make_spectrum(self, input_file, output_file="spectrum.h5",
+                      line_list_file="lines.txt",
+                      use_peculiar_velocity=True, njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -119,11 +123,35 @@
 
         input_file : string
            path to input ray data.
-        output_file : string
-           path for output file.  File formats are chosen based on the filename extension.
-           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
-        use_peculiar_velocity : bool
+        output_file : optional, string
+           path for output file.  File formats are chosen based on the 
+           filename extension.  ``.h5`` for hdf5, ``.fits`` for fits, 
+           and everything else is ASCII.
+           Default: "spectrum.h5"
+        line_list_file : optional, string
+           path to file in which the list of all deposited lines 
+           will be saved.  If set to None, the line list will not 
+           be saved.  Note, when running in parallel, combining the 
+           line lists can be quite slow, so it is recommended to set 
+           this to None when running in parallel unless you really
+           want them.
+           Default: "lines.txt"
+        use_peculiar_velocity : optional, bool
            if True, include line of sight velocity for shifting lines.
+           Default: True
+        njobs : optional, int or "auto"
+           the number of process groups into which the loop over
+           absorption lines will be divided.  If set to -1, each 
+           absorption line will be deposited by exactly one processor.
+           If njobs is set to a value less than the total number of 
+           available processors (N), then the deposition of an 
+           individual line will be parallelized over (N / njobs)
+           processors.  If set to "auto", it will first try to 
+           parallelize over the list of lines and only parallelize 
+           the line deposition if there are more processors than
+           lines.  This is the optimal strategy for parallelizing 
+           spectrum generation.
+           Default: "auto"
         """
 
         input_fields = ['dl', 'redshift', 'temperature']
@@ -145,7 +173,12 @@
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
-        self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
+        if njobs == "auto":
+            comm = _get_comm(())
+            njobs = min(comm.size, len(self.line_list))
+        
+        self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
+                                    line_list_file is not None, njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
         self.flux_field = np.exp(-self.tau_field)
@@ -156,7 +189,8 @@
             self._write_spectrum_fits(output_file)
         else:
             self._write_spectrum_ascii(output_file)
-        self._write_spectrum_line_list(line_list_file)
+        if line_list_file is not None:
+            self._write_spectrum_line_list(line_list_file)
 
         del field_data
         return (self.lambda_bins, self.flux_field)
@@ -196,7 +230,8 @@
                 pbar.update(i)
             pbar.finish()
 
-    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity):
+    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
+                               save_line_list, njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
@@ -205,7 +240,7 @@
         # Widen wavelength window until optical depth reaches a max value at the ends.
         max_tau = 0.001
 
-        for line in self.line_list:
+        for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
             delta_lambda = line['wavelength'] * field_data['redshift']
             if use_peculiar_velocity:
@@ -238,7 +273,7 @@
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)
-            for i, lixel in enumerate(valid_lines):
+            for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
                 my_bin_ratio = spectrum_bin_ratio
                 while True:
                     lambda_bins, line_tau = \
@@ -261,7 +296,7 @@
                                           my_bin_ratio *
                                           width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
-                if line['label_threshold'] is not None and \
+                if save_line_list and line['label_threshold'] is not None and \
                         column_density[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
                         peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
@@ -280,6 +315,13 @@
             del column_density, delta_lambda, thermal_b, \
                 center_bins, width_ratio, left_index, right_index
 
+        comm = _get_comm(())
+        self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
+        if save_line_list:
+            self.spectrum_line_list = comm.par_combine_object(
+                self.spectrum_line_list, "cat", datatype="list")
+
+    @parallel_root_only
     def _write_spectrum_line_list(self, filename):
         """
         Write out list of spectral lines.
@@ -295,6 +337,7 @@
                                                 line['redshift'], line['v_pec']))
         f.close()
 
+    @parallel_root_only
     def _write_spectrum_ascii(self, filename):
         """
         Write spectrum to an ascii file.
@@ -307,6 +350,7 @@
                                     self.tau_field[i], self.flux_field[i]))
         f.close()
 
+    @parallel_root_only
     def _write_spectrum_fits(self, filename):
         """
         Write spectrum to a fits file.
@@ -318,6 +362,7 @@
         tbhdu = pyfits.BinTableHDU.from_columns(cols)
         tbhdu.writeto(filename, clobber=True)
 
+    @parallel_root_only
     def _write_spectrum_hdf5(self, filename):
         """
         Write spectrum to an hdf5 file.

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -141,7 +141,7 @@
             if flag and species=='lya' and min(yDatBounded)<.1:
                newLinesP=_large_flag_fit(xBounded,yDatBounded,
                         yFitBounded,z,speciesDict,
-                        minSize,minError)
+                        fitLim,minError)
 
             if np.size(newLinesP)> 0:
 
@@ -226,7 +226,7 @@
     """
 
     #Setup initial line guesses
-    if initP==None: #Regular fit
+    if initP is None: #Regular fit
         initP = [0,0,0] 
         if min(yDat)<.01: #Large lines get larger initial guess 
             initP[0] = speciesDict['init_N']*10**2
@@ -252,7 +252,7 @@
     #Values to proceed through first run
     errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
-    if errBound == None:
+    if errBound is None:
         errBound = len(yDat)*(max(1-yDat)*1E-2)**2
     else:
         errBound = errBound*len(yDat)
@@ -548,7 +548,7 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
-            if errBound == None:
+            if errBound is None:
                 errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
             else:
                 errBound = 10*errBound*len(yb)

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -14,10 +14,11 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import os
 
 from yt.convenience import \
     simulation
-from yt.funcs import *
+from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
 

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -24,8 +24,6 @@
     CosmologySplice
 from yt.convenience import \
     load
-from yt.utilities.cosmology import \
-    Cosmology
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/light_cone/setup.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/setup.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import copy
 import h5py
 import numpy as np
 

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/light_ray/setup.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/setup.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/cosmological_observation/setup.py
--- a/yt/analysis_modules/cosmological_observation/setup.py
+++ b/yt/analysis_modules/cosmological_observation/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -35,13 +35,13 @@
 
 import numpy as np
 import h5py
-import time
-import pdb
+import glob
+import os
+
 from yt.extern.six.moves import cPickle
-import glob
+from yt.extern.pykdtree import KDTree
+from yt.funcs import mylog, get_pbar
 
-from yt.funcs import *
-from yt.extern.pykdtree import KDTree
 import yt.extern.pydot as pydot
 
 # We don't currently use this, but we may again find a use for it in the
@@ -180,7 +180,7 @@
 
     def calculate_parentage_fractions(self, other_catalog, radius = 0.10):
         parentage_fractions = {}
-        if self.halo_positions == None or other_catalog.halo_positions == None:
+        if self.halo_positions is None or other_catalog.halo_positions is None:
             return parentage_fractions
         mylog.debug("Ball-tree query with radius %0.3e", radius)
         all_nearest = self.halo_kdtree.query_ball_tree(
@@ -320,7 +320,7 @@
         for redshift in self.redshifts.values():
             if redshift <= zrange[0] and redshift >= zrange[1]:
                 # some reverse lookup magic--assumes unique cycle/z pairs
-                cycle = [key for key,value in mt.redshifts.items() \
+                cycle = [key for key,value in self.redshifts.items() \
                          if value == redshift][0]
                 del self.redshifts[cycle]
 
@@ -568,7 +568,7 @@
             automatically. See GraphViz (e.g. "dot -v")
             for a list of available output formats.
         """
-        if filename == None: 
+        if filename is None:
             filename = "%s/tree_halo%5.5d.gv" % \
                         (self.FOF_directory, self.halonum)
         # Create the pydot graph object.
@@ -598,7 +598,6 @@
                     #      (lvl, br.halo_id, next_lvl, c[0], color)
                     
                     #fp.write(line)
-                    last_level = (ii,lvl)
         for ii,lvl in enumerate(sorted_lvl):
             npart_max = 0
             for br in self.levels[lvl]:

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -18,23 +18,22 @@
 import os
 
 from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     periodic_distance
+    periodic_distance
 from yt.data_objects.profiles import \
-     create_profile
+    create_profile
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray
 from yt.utilities.exceptions import \
-     YTSphereTooSmall
+    YTSphereTooSmall
 from yt.funcs import \
-     ensure_list, is_root
-from yt.utilities.exceptions import YTUnitConversionError
+    ensure_list
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.operator_registry import \
-     OperatorRegistry
+    OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
-     PhasePlot
+    PhasePlot
 
 callback_registry = OperatorRegistry()
     

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -138,7 +138,7 @@
         self.actions = []
         # fields to be written to the halo catalog
         self.quantities = []
-        if not self.halos_ds is None:
+        if self.halos_ds is not None:
             self.add_default_quantities()
 
     def add_callback(self, callback, *args, **kwargs):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -17,8 +17,6 @@
 
 from yt.analysis_modules.halo_finding.halo_objects import \
     FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalog.data_structures import \
-    HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
 from yt.units.dimensions import length

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
--- a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+++ b/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
@@ -1,10 +1,7 @@
 import os
 import sys
 
-from yt.analysis_modules.halo_analysis.api import \
-    HaloCatalog
 from yt.convenience import load
-from yt.testing import *
 from yt.utilities.answer_testing.framework import \
     FieldValuesTest, \
     requires_ds

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/fof/setup.py
--- a/yt/analysis_modules/halo_finding/fof/setup.py
+++ b/yt/analysis_modules/halo_finding/fof/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -17,21 +17,15 @@
 import h5py
 import math
 import numpy as np
-import random
-import sys
 import glob
 import os
 import os.path as path
 from functools import cmp_to_key
-from collections import defaultdict
 from yt.extern.six import add_metaclass
 from yt.extern.six.moves import zip as izip
 
 from yt.config import ytcfg
 from yt.funcs import mylog, ensure_dir_exists
-from yt.utilities.performance_counters import \
-    time_function, \
-    yt_counters
 from yt.utilities.math_utils import \
     get_rotation_matrix, \
     periodic_dist
@@ -39,7 +33,7 @@
     mass_sun_cgs, \
     TINY
 from yt.utilities.physical_ratios import \
-     rho_crit_g_cm3_h2
+    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -282,7 +276,7 @@
         return r.max()
 
     def __getitem__(self, key):
-        if ytcfg.getboolean("yt", "inline") == False:
+        if ytcfg.getboolean("yt", "inline") is False:
             return self.data[key][self.indices]
         else:
             return self.data[key][self.indices]
@@ -339,8 +333,6 @@
         if ('io','creation_time') in self.data.ds.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
                 data=self['creation_time'])
-        n = handle["/%s" % gn]
-        # set attributes on n
         self._processing = False
 
     def virial_mass(self, virial_overdensity=200., bins=300):
@@ -419,7 +411,7 @@
         """
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
-        if (over == True).any():
+        if over.any():
             vir_bin = max(np.arange(bins + 1)[over])
             return vir_bin
         else:
@@ -1233,7 +1225,6 @@
         fglob = path.join(basedir, 'halos_%d.*.bin' % n)
         files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        Jc = 1.0
         length = 1.0 / ds['Mpchcm']
         conv = dict(pos = np.array([length, length, length,
                                     1, 1, 1]), # to unitary

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/hop/setup.py
--- a/yt/analysis_modules/halo_finding/hop/setup.py
+++ b/yt/analysis_modules/halo_finding/hop/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -16,23 +16,21 @@
 
 from yt.config import ytcfg
 from yt.data_objects.time_series import \
-     DatasetSeries
+    DatasetSeries
 from yt.funcs import \
-     is_root
+    is_root, mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, ProcessorPool, Communicator
-from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
+    ParallelAnalysisInterface, ProcessorPool
+from yt.analysis_modules.halo_finding.halo_objects import \
+    RockstarHaloList
 from yt.utilities.exceptions import YTRockstarMultiMassNotSupported
 
 from . import rockstar_interface
 
 import socket
 import time
-import threading
-import signal
 import os
-from os import environ
-from os import mkdir
+import numpy as np
 from os import path
 
 class InlineRunner(ParallelAnalysisInterface):
@@ -202,7 +200,7 @@
             mylog.info("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
-        if ytcfg.getboolean("yt", "inline") == True:
+        if ytcfg.getboolean("yt", "inline") is True:
             self.runner = InlineRunner()
         else:
             self.runner = StandardRunner(num_readers, num_writers)
@@ -247,8 +245,6 @@
 
         dd = tds.all_data()
         # Get DM particle mass.
-        all_fields = set(tds.derived_field_list + tds.field_list)
-        has_particle_type = ("particle_type" in all_fields)
 
         particle_mass = self.particle_mass
         if particle_mass is None:

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
 from __future__ import print_function
-import setuptools
-import os, sys, os.path
-
 import os.path
+import sys
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_finding/tests/test_rockstar.py
--- a/yt/analysis_modules/halo_finding/tests/test_rockstar.py
+++ b/yt/analysis_modules/halo_finding/tests/test_rockstar.py
@@ -2,7 +2,6 @@
 import sys
 
 from yt.convenience import load
-from yt.testing import *
 from yt.utilities.answer_testing.framework import \
     FieldValuesTest, \
     requires_sim

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math, time
+import math
 
 from yt.funcs import mylog
 from yt.units.yt_array import \
@@ -184,7 +184,7 @@
             if log_mass_max is None:
                 self.log_mass_max = 16
         # If we're making the analytic function...
-        if self.make_analytic == True:
+        if self.make_analytic is True:
             # Try to set cosmological parameters from the simulation dataset
             if simulation_ds is not None:
                 self.omega_matter0 = self.simulation_ds.omega_matter
@@ -623,27 +623,27 @@
 many times as you want. */
 
 /* TFmdm_set_cosm() -- User passes all the cosmological parameters as
-	arguments; the routine sets up all of the scalar quantites needed 
-	computation of the fitting formula.  The input parameters are: 
-	1) omega_matter -- Density of CDM, baryons, and massive neutrinos,
-				in units of the critical density. 
-	2) omega_baryon -- Density of baryons, in units of critical. 
-	3) omega_hdm    -- Density of massive neutrinos, in units of critical 
-	4) degen_hdm    -- (Int) Number of degenerate massive neutrino species 
-	5) omega_lambda -- Cosmological constant 
-	6) hubble       -- Hubble constant, in units of 100 km/s/Mpc 
-	7) redshift     -- The redshift at which to evaluate */
+   arguments; the routine sets up all of the scalar quantites needed 
+   computation of the fitting formula.  The input parameters are: 
+   1) omega_matter -- Density of CDM, baryons, and massive neutrinos,
+                      in units of the critical density. 
+   2) omega_baryon -- Density of baryons, in units of critical. 
+   3) omega_hdm    -- Density of massive neutrinos, in units of critical 
+   4) degen_hdm    -- (Int) Number of degenerate massive neutrino species 
+   5) omega_lambda -- Cosmological constant 
+   6) hubble       -- Hubble constant, in units of 100 km/s/Mpc 
+   7) redshift     -- The redshift at which to evaluate */
 
 /* TFmdm_onek_mpc() -- User passes a single wavenumber, in units of Mpc^-1.
-	Routine returns the transfer function from the Eisenstein & Hu
-	fitting formula, based on the cosmology currently held in the 
-	internal variables.  The routine returns T_cb (the CDM+Baryon
-	density-weighted transfer function), although T_cbn (the CDM+
-	Baryon+Neutrino density-weighted transfer function) is stored
-	in the global variable tf_cbnu. */
+   Routine returns the transfer function from the Eisenstein & Hu
+   fitting formula, based on the cosmology currently held in the 
+   internal variables.  The routine returns T_cb (the CDM+Baryon
+   density-weighted transfer function), although T_cbn (the CDM+
+   Baryon+Neutrino density-weighted transfer function) is stored
+   in the global variable tf_cbnu. */
 
 /* We also supply TFmdm_onek_hmpc(), which is identical to the previous
-	routine, but takes the wavenumber in units of h Mpc^-1. */
+   routine, but takes the wavenumber in units of h Mpc^-1. */
 
 /* We hold the internal scalar quantities in global variables, so that
 the user may access them in an external program, via "extern" declarations. */
@@ -667,7 +667,7 @@
         sets many global variables for use in TFmdm_onek_mpc() */
     """
     def __init__(self, omega_matter, omega_baryon, omega_hdm,
-	    degen_hdm, omega_lambda, hubble, redshift):
+                 degen_hdm, omega_lambda, hubble, redshift):
         self.qwarn = 0;
         self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K
     

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/halo_mass_function/setup.py
--- a/yt/analysis_modules/halo_mass_function/setup.py
+++ b/yt/analysis_modules/halo_mass_function/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -19,7 +19,7 @@
 
 from yt.fields.derived_field import \
     ValidateSpatial
-from yt.funcs import mylog
+from yt.funcs import mylog, iterable
 from yt.extern.six import string_types
 
 from .clump_info_items import \

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/level_sets/clump_tools.py
--- a/yt/analysis_modules/level_sets/clump_tools.py
+++ b/yt/analysis_modules/level_sets/clump_tools.py
@@ -28,9 +28,9 @@
     counter += 1
     list.append(clump)
     clump.level = level
-    if clump.children != None:
+    if clump.children is not None:
         for child in clump.children:
-            x = recursive_all_clumps(child,list,level+1,clump.number)
+            recursive_all_clumps(child,list,level+1,clump.number)
     return list
 
 def return_all_clumps(clump):
@@ -64,8 +64,6 @@
     Recursive. Prints the level and the number of cores to the screen."""
 
     global counter
-    if dbg > 0:
-        print(tabs(level), "l =",level, "n_core",counter)
 
     if ((clump.children is None) or (len(clump.children) == 0)):
         counter += 1

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.operator_registry import \
     OperatorRegistry

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -13,11 +13,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from itertools import chain
 import numpy as np
 
-from yt.funcs import *
-import yt.utilities.data_point_utilities as data_point_utilities
+from collections import defaultdict
+
+from yt.funcs import mylog, get_pbar
 from yt.utilities.lib.ContourFinding import \
     ContourTree, TileContourTree, link_node_contours, \
     update_joins
@@ -32,7 +32,6 @@
     contours = {}
     node_ids = []
     DLE = data_source.ds.domain_left_edge
-    selector = getattr(data_source, "base_object", data_source).selector
     masks = dict((g.id, m) for g, m in data_source.blocks)
     for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
         node.node_ind = len(node_ids)

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/level_sets/setup.py
--- a/yt/analysis_modules/level_sets/setup.py
+++ b/yt/analysis_modules/level_sets/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/list_modules.py
--- a/yt/analysis_modules/list_modules.py
+++ b/yt/analysis_modules/list_modules.py
@@ -33,7 +33,7 @@
     def __getattr__(self, attr):
         try:
             name = "yt.analysis_modules.%s.api" % (attr)
-            nm = __import__(name, level=-1)
+            __import__(name, level=-1)
             setattr(self, attr, sys.modules[name])
         except ImportError:
             raise AttributeError(attr)

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -16,7 +16,7 @@
 from yt.utilities.lib.CICDeposit import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
-from yt.funcs import *
+from yt.funcs import mylog, get_pbar
 from yt.units.yt_array import array_like_field
 from yt.config import ytcfg
 from collections import OrderedDict

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/particle_trajectories/setup.py
--- a/yt/analysis_modules/particle_trajectories/setup.py
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -1,8 +1,5 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
+
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -24,7 +24,8 @@
 
 from yt.extern.six import string_types
 import numpy as np
-from yt.funcs import *
+from yt.funcs import mylog, get_pbar
+from yt.units.yt_array import YTArray
 from yt.utilities.physical_constants import mp
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_objects

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -26,14 +26,15 @@
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
 import numpy as np
-from yt.funcs import *
+from yt.funcs import \
+    mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
 from yt.utilities.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only, get_mpi_type, \
-     parallel_capable
+    communication_system, parallel_root_only, get_mpi_type, \
+    parallel_capable
 from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
 import h5py
 from yt.utilities.on_demand_imports import _astropy

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/photon_simulator/setup.py
--- a/yt/analysis_modules/photon_simulator/setup.py
+++ b/yt/analysis_modules/photon_simulator/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -11,11 +11,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import numpy as np
 import os
-from yt.funcs import *
-from yt.units.yt_array import YTQuantity
-import h5py
+
+from yt.funcs import mylog
+from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.on_demand_imports import _astropy, _scipy
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/photon_simulator/tests/test_cluster.py
--- a/yt/analysis_modules/photon_simulator/tests/test_cluster.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_cluster.py
@@ -10,11 +10,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, TableAbsorbModel, \
+    ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.analysis_modules.photon_simulator.api import *
+from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
-     GenericArrayTest, data_dir_load
+    GenericArrayTest, data_dir_load
 import numpy as np
 
 def setup():

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/ppv_cube/setup.py
--- a/yt/analysis_modules/ppv_cube/setup.py
+++ b/yt/analysis_modules/ppv_cube/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -15,7 +15,7 @@
 import yt.units as u
 from yt.utilities.physical_constants import kboltz, mh, clight
 import numpy as np
-from yt.testing import *
+from yt.testing import assert_allclose_units
 
 def setup():
     """Test specific setup."""

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
--- a/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
+++ b/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py
@@ -11,13 +11,13 @@
 #-----------------------------------------------------------------------------
 
 import yt
-from yt.testing import *
+from yt.testing import assert_allclose
 from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
 from yt.utilities.answer_testing.framework import \
     AnswerTestingTest, \
     requires_ds
-from yt.config import ytcfg
 import tempfile
+import numpy as np
 import os
 import shutil
 

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
-import sys
+
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules', parent_package, top_path)

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/spectral_integrator/setup.py
--- a/yt/analysis_modules/spectral_integrator/setup.py
+++ b/yt/analysis_modules/spectral_integrator/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/star_analysis/setup.py
--- a/yt/analysis_modules/star_analysis/setup.py
+++ b/yt/analysis_modules/star_analysis/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -18,7 +18,6 @@
 import numpy as np
 import h5py
 import math
-import itertools
 
 from yt.config import ytcfg
 from yt.extern.six.moves import zip as izip

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -19,14 +19,17 @@
 except ImportError: 
     pass
 
+import os
 import time
 import numpy as np
-from yt.funcs import *
+
 import yt.utilities.lib.api as amr_utils
+
+from yt import add_field
+from yt.funcs import get_pbar, mylog
 from yt.utilities.physical_constants import \
     kpc_per_cm, \
     sec_per_year
-from yt.mods import *
 
 def export_to_sunrise(ds, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
@@ -492,7 +495,7 @@
     idxst = dd["particle_type"] == star_type
 
     #make sure we select more than a single particle
-    assert na.sum(idxst)>0
+    assert np.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
@@ -543,7 +546,7 @@
     
     #make sure we have nonzero particle number
     assert pd_table.data.shape[0]>0
-    return pd_table,na.sum(idx)
+    return pd_table,np.sum(idx)
 
 
 def add_fields():
@@ -559,12 +562,8 @@
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
         star_mass_ejection_fraction = data.ds.get_parameter("StarMassEjectionFraction",float)
-        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-        dtForSFR = star_maker_minimum_dynamical_time / data.ds["years"]
         xv1 = ((data.ds["InitialTime"] - data["creation_time"])
                 / data["dynamical_time"])
-        xv2 = ((data.ds["InitialTime"] + dtForSFR - data["creation_time"])
-                / data["dynamical_time"])
         denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1)))
         minitial = data["ParticleMassMsun"] / denom
         return minitial

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/sunyaev_zeldovich/setup.py
--- a/yt/analysis_modules/sunyaev_zeldovich/setup.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -11,12 +11,12 @@
 #-----------------------------------------------------------------------------
 
 from yt.frontends.stream.api import load_uniform_grid
-from yt.funcs import get_pbar, mylog
+from yt.funcs import get_pbar
 from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-     mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
-from yt.testing import *
+    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
-     GenericArrayTest, data_dir_load, GenericImageTest
+    GenericArrayTest, data_dir_load, GenericImageTest
 try:
     from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
 except ImportError:

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/two_point_functions/setup.py
--- a/yt/analysis_modules/two_point_functions/setup.py
+++ b/yt/analysis_modules/two_point_functions/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -14,17 +14,19 @@
 #-----------------------------------------------------------------------------
 
 import h5py
-from yt.mods import *
-#from yt.utilities.math_utils import *
-from yt.utilities.performance_counters import yt_counters, time_function
+import numpy as np
+
+from yt.funcs import mylog
+from yt.utilities.performance_counters import yt_counters
 from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only
 
 try:
-    from yt.utilities.kdtree.api import *
+    from yt.utilities.kdtree.api import \
+        fKD, free_tree, create_tree
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, sys, itertools, inspect, types, time
+import math, inspect, time
 from collections import defaultdict
 
 sep = 12
@@ -117,7 +119,7 @@
         self.index = ds.index
         self.center = (ds.domain_right_edge + ds.domain_left_edge)/2.0
         # Figure out the range of ruler lengths.
-        if length_range == None:
+        if length_range is None:
             length_range = [math.sqrt(3) * self.ds.index.get_smallest_dx(),
                 self.min_edge/2.]
         else:
@@ -679,7 +681,7 @@
         """
         for fset in self._fsets:
             # Only operate on correlation functions.
-            if fset.corr_norm == None: continue
+            if fset.corr_norm is None: continue
             fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__)
             line = "# length".ljust(sep)
             line += "\\xi".ljust(sep)

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -321,7 +321,7 @@
         _units_initialized = False
         with self.data_source._field_parameter_state(self.field_parameters):
             for chunk in parallel_objects(self.data_source.chunks(
-                                          [], "io", local_only = True)): 
+                                          [], "io", local_only = True)):
                 mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)",
                             chunk.ires.size, get_memory_usage()/1024.)
                 if _units_initialized is False:
@@ -682,11 +682,13 @@
     def RightEdge(self):
         return self.right_edge
 
-    def deposit(self, positions, fields = None, method = None):
+    def deposit(self, positions, fields = None, method = None,
+                kernel_name = 'cubic'):
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        # We allocate number of zones, not number of octs
+        op = cls(self.ActiveDimensions.prod(), kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
@@ -910,22 +912,22 @@
         return ls
 
     def _minimal_box(self, dds):
-        LL = self.left_edge - self.ds.domain_left_edge
+        LL = self.left_edge.d - self.ds.domain_left_edge.d
         # Nudge in case we're on the edge
-        LL += LL.uq * np.finfo(np.float64).eps
-        LS = self.right_edge - self.ds.domain_left_edge
-        LS += LS.uq * np.finfo(np.float64).eps
+        LL += np.finfo(np.float64).eps
+        LS = self.right_edge.d - self.ds.domain_left_edge.d
+        LS += np.finfo(np.float64).eps
         cell_start = LL / dds  # This is the cell we're inside
         cell_end = LS / dds
         if self.level == 0:
             start_index = np.array(np.floor(cell_start), dtype="int64")
             end_index = np.array(np.ceil(cell_end), dtype="int64")
-            dims = np.rint((self.ActiveDimensions * self.dds) / dds).astype("int64")
+            dims = np.rint((self.ActiveDimensions * self.dds.d) / dds).astype("int64")
         else:
             # Give us one buffer
-            start_index = np.rint(cell_start.d).astype('int64') - 1
+            start_index = np.rint(cell_start).astype('int64') - 1
             # How many root cells do we occupy?
-            end_index = np.rint(cell_end.d).astype('int64')
+            end_index = np.rint(cell_end).astype('int64')
             dims = end_index - start_index + 1
         return start_index, end_index.astype("int64"), dims.astype("int32")
 
@@ -1211,9 +1213,9 @@
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> for i, r in enumerate(rhos):
         ...     surf = ds.surface(sp,'density',r)
-        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
-        ...                      color_field='temperature', dist_fac = distf, 
-        ...                      plot_index = i, color_field_max = ma, 
+        ...     surf.export_obj("my_galaxy", transparency=trans[i],
+        ...                      color_field='temperature', dist_fac = distf,
+        ...                      plot_index = i, color_field_max = ma,
         ...                      color_field_min = mi)
 
         >>> sp = ds.sphere("max", (10, "kpc"))
@@ -1787,5 +1789,3 @@
         else:
             mylog.error("Problem uploading.")
         return upload_id
-
-

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -34,10 +34,13 @@
 derived_quantity_registry = {}
 
 def get_position_fields(field, data):
+    axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]]
     if field[0] in data.ds.particle_types:
-        position_fields = [(field[0], 'particle_position_%s' % d) for d in 'xyz']
+        position_fields = [(field[0], 'particle_position_%s' % d)
+                           for d in axis_names]
     else:
-        position_fields = ['x', 'y', 'z']
+        position_fields = axis_names
+
     return position_fields
 
 class RegisteredDerivedQuantity(type):

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -77,13 +77,12 @@
         if self.start_index is not None:
             return self.start_index
         if self.Parent is None:
-            left = self.LeftEdge - self.ds.domain_left_edge
-            start_index = left / self.dds
-            return np.rint(start_index).astype('int64').ravel().view(np.ndarray)
+            left = self.LeftEdge.d - self.ds.domain_left_edge.d
+            start_index = left / self.dds.d
+            return np.rint(start_index).astype('int64').ravel()
 
-        pdx = self.Parent.dds.ndarray_view()
-        di = np.rint( (self.LeftEdge.ndarray_view() -
-                       self.Parent.LeftEdge.ndarray_view()) / pdx)
+        pdx = self.Parent.dds.d
+        di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
         start_index = self.Parent.get_global_startindex() + di
         self.start_index = (start_index * self.ds.refine_by).astype('int64').ravel()
         return self.start_index
@@ -251,7 +250,7 @@
         field_parameters.update(self.field_parameters)
         if smoothed:
             cube = self.ds.smoothed_covering_grid(
-                level, new_left_edge, 
+                level, new_left_edge,
                 field_parameters = field_parameters,
                 **kwargs)
         else:
@@ -330,12 +329,14 @@
     def particle_operation(self, *args, **kwargs):
         raise NotImplementedError
 
-    def deposit(self, positions, fields = None, method = None):
+    def deposit(self, positions, fields = None, method = None,
+                kernel_name = 'cubic'):
         # Here we perform our particle deposition.
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        # We allocate number of zones, not number of octs
+        op = cls(self.ActiveDimensions.prod(), kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -141,7 +141,8 @@
             self._domain_ind = di
         return self._domain_ind
 
-    def deposit(self, positions, fields = None, method = None):
+    def deposit(self, positions, fields = None, method = None,
+                kernel_name='cubic'):
         r"""Operate on the mesh, in a particle-against-mesh fashion, with
         exclusively local input.
 
@@ -176,7 +177,8 @@
             raise YTParticleDepositionNotImplemented(method)
         nz = self.nz
         nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
-        op = cls(nvals) # We allocate number of zones, not number of octs
+        # We allocate number of zones, not number of octs
+        op = cls(nvals, kernel_name)
         op.initialize()
         mylog.debug("Depositing %s (%s^3) particles into %s Octs",
             positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
@@ -192,7 +194,8 @@
         return np.asfortranarray(vals)
 
     def smooth(self, positions, fields = None, index_fields = None,
-               method = None, create_octree = False, nneighbors = 64):
+               method = None, create_octree = False, nneighbors = 64,
+               kernel_name = 'cubic'):
         r"""Operate on the mesh, in a particle-against-mesh fashion, with
         non-local input.
 
@@ -258,7 +261,7 @@
         nz = self.nz
         mdom_ind = self.domain_ind
         nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
-        op = cls(nvals, len(fields), nneighbors)
+        op = cls(nvals, len(fields), nneighbors, kernel_name)
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
@@ -280,7 +283,7 @@
         return vals
 
     def particle_operation(self, positions, fields = None,
-            method = None, nneighbors = 64):
+            method = None, nneighbors = 64, kernel_name = 'cubic'):
         r"""Operate on particles, in a particle-against-particle fashion.
 
         This uses the octree indexing system to call a "smoothing" operation
@@ -335,7 +338,7 @@
         nz = self.nz
         mdom_ind = self.domain_ind
         nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
-        op = cls(nvals, len(fields), nneighbors)
+        op = cls(nvals, len(fields), nneighbors, kernel_name)
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -713,6 +713,7 @@
                               unit_registry=self.unit_registry))
             setattr(self, "critical_density",
                     self.cosmology.critical_density(self.current_redshift))
+            self.scale_factor = 1.0 / (1.0 + self.current_redshift)
 
     def get_unit_from_registry(self, unit_str):
         """

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -105,13 +105,15 @@
     def select_tcoords(self, dobj):
         raise NotImplementedError
 
-    def deposit(self, positions, fields = None, method = None):
+    def deposit(self, positions, fields = None, method = None,
+                kernel_name = 'cubic'):
         raise NotImplementedError
         # Here we perform our particle deposition.
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        # We allocate number of zones, not number of octs
+        op = cls(self.ActiveDimensions.prod(), kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
@@ -200,4 +202,3 @@
             else:
                 self._last_count = mask.sum()
         return mask
-

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -215,7 +215,11 @@
             return new_field
         return func
 
-    grad_units = "(%s) / cm" % field_units
+    if field_units != "":
+        grad_units = "(%s) / cm" % field_units
+    else:
+        grad_units = "1 / cm"
+
     for axi, ax in enumerate('xyz'):
         f = grad_func(axi, ax)
         registry.add_field((ftype, "%s_gradient_%s" % (fname, ax)),

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -490,8 +490,8 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
+        theta = get_sph_theta(pos, normal)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -533,8 +533,8 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
+        theta = get_sph_theta(pos, normal)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         spht = get_sph_theta_component(vel, theta, phi, normal)
@@ -568,7 +568,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        phi = get_sph_phi(pos, center)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         sphp = get_sph_phi_component(vel, phi, normal)
@@ -661,7 +661,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, center)
+        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylr = get_cyl_r_component(vel, theta, normal)
@@ -685,7 +685,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, center)
+        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylt = get_cyl_theta_component(vel, theta, normal)
@@ -766,8 +766,12 @@
 
 def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
         smoothing_length_name, density_name, smoothed_field, registry,
-        nneighbors = None):
-    field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
+        nneighbors = None, kernel_name = 'cubic'):
+    if kernel_name == 'cubic':
+        field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
+    else:
+        field_name = ("deposit", "%s_%s_smoothed_%s" % (ptype, kernel_name,
+                      smoothed_field))
     field_units = registry[ptype, smoothed_field].units
     def _vol_weight(field, data):
         pos = data[ptype, coord_name]
@@ -789,7 +793,8 @@
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
                          method="volume_weighted",
-                         create_octree=True)[0]
+                         create_octree=True,
+                         kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
         rv = data.apply_units(rv, field_units)
@@ -816,8 +821,12 @@
                        units = "code_length")
     return [field_name]
 
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64):
-    field_name = (ptype, "smoothed_density")
+def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
+                       kernel_name = 'cubic'):
+    if kernel_name == 'cubic':
+        field_name = (ptype, "smoothed_density")
+    else:
+        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
     field_units = registry[ptype, mass_name].units
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name]
@@ -827,7 +836,8 @@
         densities = mass * 0.0
         data.particle_operation(pos, [mass, densities],
                          method="density",
-                         nneighbors = nneighbors)
+                         nneighbors = nneighbors,
+                         kernel_name = kernel_name)
         ones = pos.prod(axis=1) # Get us in code_length**3
         ones[:] = 1.0
         densities /= ones

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -196,15 +196,23 @@
 
 def test_add_gradient_fields():
     gfields = base_ds.add_gradient_fields(("gas","density"))
+    gfields += base_ds.add_gradient_fields(("index", "ones"))
     field_list = [('gas', 'density_gradient_x'),
                   ('gas', 'density_gradient_y'),
                   ('gas', 'density_gradient_z'),
-                  ('gas', 'density_gradient_magnitude')]
+                  ('gas', 'density_gradient_magnitude'),
+                  ('index', 'ones_gradient_x'),
+                  ('index', 'ones_gradient_y'),
+                  ('index', 'ones_gradient_z'),
+                  ('index', 'ones_gradient_magnitude')]
     assert_equal(gfields, field_list)
     ad = base_ds.all_data()
     for field in field_list:
         ret = ad[field]
-        assert str(ret.units) == "g/cm**4"
+        if field[0] == 'gas':
+            assert str(ret.units) == "g/cm**4"
+        else:
+            assert str(ret.units) == "1/cm"
 
 def get_data(ds, field_name):
     # Need to create a new data object otherwise the errors we are

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -133,7 +133,8 @@
         tr = dict((field, v) for field, v in zip(fields, tr))
         return tr
 
-    def deposit(self, positions, fields = None, method = None):
+    def deposit(self, positions, fields = None, method = None,
+                kernel_name = 'cubic'):
         # Here we perform our particle deposition.
         if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
@@ -141,7 +142,8 @@
             raise YTParticleDepositionNotImplemented(method)
         nz = self.nz
         nvals = (nz, nz, nz, self.ires.size)
-        op = cls(nvals) # We allocate number of zones, not number of octs
+        # We allocate number of zones, not number of octs
+        op = cls(nvals, kernel_name)
         op.initialize()
         mylog.debug("Depositing %s (%s^3) particles into %s Root Mesh",
             positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/frontends/artio/setup.py
--- a/yt/frontends/artio/setup.py
+++ b/yt/frontends/artio/setup.py
@@ -19,7 +19,8 @@
                          depends=artio_sources + 
                                  ["yt/utilities/lib/fp_utils.pxd",
                                   "yt/geometry/oct_container.pxd",
-                                  "yt/geometry/selection_routines.pxd"])
+                                  "yt/geometry/selection_routines.pxd",
+                                  "yt/geometry/particle_deposit.pxd"])
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,26 +491,18 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        #Please note that for all units given in the info file, the boxlen
-        #still needs to be folded in, as shown below!
+        # loading the units from the info file
+        boxlen=self.parameters['boxlen']
+        length_unit = self.parameters['unit_l']
+        density_unit = self.parameters['unit_d']
+        time_unit = self.parameters['unit_t']
 
-        boxlen=self.parameters['boxlen']
-        length_unit = self.parameters['unit_l'] * boxlen
-        density_unit = self.parameters['unit_d']/ boxlen**3
-
-        # In the mass unit, the factors of boxlen cancel back out, so this 
-        #is equivalent to unit_d*unit_l**3
-
-        mass_unit = density_unit * length_unit**3
-
-        # Cosmological runs are done in lookback conformal time. 
-        # To convert to proper time, the time unit is calculated from 
-        # the expansion factor. This is not yet  done here!
-
-        time_unit = self.parameters['unit_t']
+        # calculating derived units (except velocity and temperature, done below)
+        mass_unit = density_unit * length_unit**3     
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
+
         # TODO:
         # Generalize the temperature field to account for ionization
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
@@ -518,13 +510,15 @@
 
         self.density_unit = self.quan(density_unit, 'g/cm**3')
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+        self.time_unit = self.quan(time_unit, "s")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+        self.temperature_unit = (self.velocity_unit**2*mp* 
+                                 mean_molecular_weight_factor/kb).in_units('K')
+
+        # Only the length unit get scales by a factor of boxlen
         self.length_unit = self.quan(length_unit * boxlen, "cm")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.time_unit = self.quan(time_unit, "s")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2 * mp *
-                                 mean_molecular_weight_factor / kb)
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -38,7 +38,7 @@
 # Standard SPH kernel for use with the Grid method #
 ####################################################
 
-cdef inline np.float64_t sph_kernel(np.float64_t x) nogil:
+cdef inline np.float64_t sph_kernel_cubic(np.float64_t x) nogil:
     cdef np.float64_t kernel
     if x <= 0.5:
         kernel = 1.-6.*x*x*(1.-x)
@@ -48,8 +48,55 @@
         kernel = 0.
     return kernel
 
+########################################################
+# Alternative SPH kernels for use with the Grid method #
+########################################################
+
+# quartic spline
+cdef inline np.float64_t sph_kernel_quartic(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 5.**6/512/np.pi
+    if x < 1:
+        kernel = (1.-x)**4
+        if x < 3./5:
+            kernel -= 5*(3./5-x)**4
+            if x < 1./5:
+                kernel += 10*(1./5-x)**4
+    else:
+        kernel = 0.
+    return kernel * C
+
+# quintic spline
+cdef inline np.float64_t sph_kernel_quintic(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 3.**7/40/np.pi
+    if x < 1:
+        kernel = (1.-x)**5
+        if x < 2./3:
+            kernel -= 6*(2./3-x)**5
+            if x < 1./3:
+                kernel += 15*(1./3-x)**5
+    else:
+        kernel = 0.
+    return kernel * C
+
+# I don't know the way to use a dict in a cdef class.
+# So in order to mimic a registry functionality,
+# I manually created a function to lookup the kernel functions.
+ctypedef np.float64_t (*kernel_func) (np.float64_t)
+cdef inline kernel_func get_kernel_func(str kernel_name):
+    if kernel_name == 'cubic':
+        return sph_kernel_cubic
+    elif kernel_name == 'quartic':
+        return sph_kernel_quartic
+    elif kernel_name == 'quintic':
+        return sph_kernel_quintic
+    else:
+        raise NotImplementedError
+
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
+    cdef kernel_func sph_kernel
     cdef public object nvals
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -25,9 +25,10 @@
     OctreeContainer, OctInfo
 
 cdef class ParticleDepositOperation:
-    def __init__(self, nvals):
+    def __init__(self, nvals, kernel_name):
         self.nvals = nvals
         self.update_values = 0 # This is the default
+        self.sph_kernel = get_kernel_func(kernel_name)
 
     def initialize(self, *args):
         raise NotImplementedError
@@ -227,7 +228,7 @@
                     dist = idist[0] + idist[1] + idist[2]
                     # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / fields[0]
-                    self.temp[gind(i,j,k,dim) + offset] = sph_kernel(dist)
+                    self.temp[gind(i,j,k,dim) + offset] = self.sph_kernel(dist)
                     kernel_sum += self.temp[gind(i,j,k,dim) + offset]
         # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
@@ -493,4 +494,3 @@
         return self.onnfield
 
 deposit_nearest = NNParticleField
-

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -22,7 +22,7 @@
 
 from fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
-from .particle_deposit cimport sph_kernel, gind
+from .particle_deposit cimport kernel_func, get_kernel_func, gind
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
@@ -34,6 +34,7 @@
 
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
+    cdef kernel_func sph_kernel
     cdef public object nvals
     cdef np.float64_t DW[3]
     cdef int nfields

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -74,7 +74,7 @@
     opos[2] = ipos[2]
 
 cdef class ParticleSmoothOperation:
-    def __init__(self, nvals, nfields, max_neighbors):
+    def __init__(self, nvals, nfields, max_neighbors, kernel_name):
         # This is the set of cells, in grids, blocks or octs, we are handling.
         cdef int i
         self.nvals = nvals
@@ -83,6 +83,7 @@
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()
+        self.sph_kernel = get_kernel_func(kernel_name)
 
     def initialize(self, *args):
         raise NotImplementedError
@@ -630,7 +631,7 @@
             # Usually this density has been computed
             dens = fields[2][pn]
             if dens == 0.0: continue
-            weight = mass * sph_kernel(sqrt(r2) / hsml) / dens
+            weight = mass * self.sph_kernel(sqrt(r2) / hsml) / dens
             # Mass of the particle times the value
             for fi in range(self.nfields - 3):
                 val = fields[fi + 3][pn]
@@ -756,7 +757,7 @@
         for pn in range(self.curn):
             mass = fields[0][self.neighbors[pn].pn]
             r2 = self.neighbors[pn].r2
-            lw = sph_kernel(sqrt(r2) / hsml)
+            lw = self.sph_kernel(sqrt(r2) / hsml)
             dens += mass * lw
         weight = (4.0/3.0) * 3.1415926 * hsml**3
         fields[1][offset] = dens/weight

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -109,6 +109,8 @@
 cdef class SelectorObject:
 
     def __cinit__(self, dobj, *args):
+        cdef np.float64_t [:] DLE
+        cdef np.float64_t [:] DRE
         self.min_level = getattr(dobj, "min_level", 0)
         self.max_level = getattr(dobj, "max_level", 99)
         self.overlap_cells = 0

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -424,7 +424,8 @@
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
-        self *= conversion_factor
+        values = self.d
+        values *= conversion_factor
 
         if offset:
             np.subtract(self, offset*self.uq, self)
@@ -1309,7 +1310,7 @@
     Parameters
     ----------
     fname : str
-        Filename to read. 
+        Filename to read.
     dtype : data-type, optional
         Data-type of the resulting array; default: float.
     delimiter : str, optional
@@ -1367,7 +1368,7 @@
             footer='', comments='#'):
     r"""
     Write YTArrays with unit information to a text file.
-    
+
     Parameters
     ----------
     fname : str
@@ -1375,7 +1376,7 @@
     arrays : list of YTArrays or single YTArray
         The array(s) to write to the file.
     fmt : str or sequence of strs, optional
-        A single format (%10.5f), or a sequence of formats. 
+        A single format (%10.5f), or a sequence of formats.
     delimiter : str, optional
         String or character separating columns.
     header : str, optional

diff -r f0f1ca8d9f176fdfe53dc701d838d5d958675730 -r 5215eda8d551e97816f17bca424c4c3c6c05df81 yt/utilities/lib/api.py
--- a/yt/utilities/lib/api.py
+++ b/yt/utilities/lib/api.py
@@ -29,3 +29,4 @@
 from .write_array import *
 from .mesh_utilities import *
 from .ContourFinding import *
+from .line_integral_convolution import *

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5e5edba3739c/
Changeset:   5e5edba3739c
Branch:      yt
User:        xarthisius
Date:        2015-08-27 16:27:38+00:00
Summary:     Merged in ngoldbaum/yt (pull request #1700)

Add support for particle fields to the [Min,Max]Location derived quantities
Affected #:  1 file

diff -r 5538c6857f6df478eaeb7074d2896b18f9c550d4 -r 5e5edba3739c1b8c9f8ee2d32e87304bc64557bf yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -33,6 +33,16 @@
 
 derived_quantity_registry = {}
 
+def get_position_fields(field, data):
+    axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]]
+    if field[0] in data.ds.particle_types:
+        position_fields = [(field[0], 'particle_position_%s' % d)
+                           for d in axis_names]
+    else:
+        position_fields = axis_names
+
+    return position_fields
+
 class RegisteredDerivedQuantity(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -542,19 +552,17 @@
         return rv
 
     def process_chunk(self, data, field):
-        axis_names = data.ds.coordinates.axis_name
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, -HUGE, field)
-        mx = array_like_field(data, -1, axis_names[0])
-        my = array_like_field(data, -1, axis_names[1])
-        mz = array_like_field(data, -1, axis_names[2])
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         maxi = -1
         if data[field].size > 0:
             maxi = np.argmax(data[field])
             ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in (axis_names[0],
-                                                    axis_names[1],
-                                                    axis_names[2])]
+            mx, my, mz = [data[ax][maxi] for ax in position_fields]
         return (ma, maxi, mx, my, mz)
 
     def reduce_intermediate(self, values):
@@ -590,14 +598,15 @@
     def process_chunk(self, data, field):
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         mini = -1
         if data[field].size > 0:
             mini = np.argmin(data[field])
             ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in 'xyz']
+            mx, my, mz = [data[ax][mini] for ax in position_fields]
         return (ma, mini, mx, my, mz)
 
     def reduce_intermediate(self, values):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list