[yt-svn] commit/yt: 20 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Mar 25 10:37:58 PDT 2014


20 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d56e687090ba/
Changeset:   d56e687090ba
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 14:07:38
Summary:     Reducing imports of frontends, simplifying process.
Affected #:  2 files

diff -r 4bc951ee2db7e00d453de6a3bdf95fdd03732836 -r d56e687090ba46a409e1bba619ecab857ff84dbb yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -12,3 +12,29 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import sys, types, os, glob, cPickle, time, importlib
+
+_frontends = [
+    'art',
+    'artio',
+    'athena',
+    'boxlib',
+    #'chombo',
+    'enzo',
+    'fits',
+    'flash',
+    #'gdf',
+    'halo_catalogs',
+    'moab',
+    #'pluto',
+    'ramses',
+    'sph',
+    'stream',
+]
+
+class _frontend_container:
+    def __init__(self):
+        for frontend in _frontends:
+            _mod = "yt.frontends.%s.api" % frontend
+            setattr(self, frontend, importlib.import_module(_mod))

diff -r 4bc951ee2db7e00d453de6a3bdf95fdd03732836 -r d56e687090ba46a409e1bba619ecab857ff84dbb yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -69,79 +69,19 @@
     ImageArray, particle_filter, create_profile, \
     Profile1D, Profile2D, Profile3D
 
-from yt.frontends.enzo.api import \
-    EnzoDataset, EnzoDatasetInMemory, \
-    EnzoSimulation, EnzoFieldInfo, add_enzo_field
-
-# Boxlib stuff
-from yt.frontends.boxlib.api import \
-    BoxlibDataset
-
-# Orion stuff
-#from yt.frontends.boxlib.api import \
-#    OrionDataset, OrionFieldInfo, add_orion_field
-
-# Maestro stuff
-#from yt.frontends.boxlib.api import \
-#    MaestroDataset
-
-# Castro stuff
-#from yt.frontends.boxlib.api import \
-#    CastroDataset
-
-from yt.frontends.flash.api import \
-    FLASHDataset, FLASHFieldInfo
-
-from yt.frontends.artio.api import \
-    ARTIODataset, ARTIOFieldInfo
-
-from yt.frontends.ramses.api import \
-    RAMSESDataset, RAMSESFieldInfo
-
-from yt.frontends.halo_catalogs.api import \
-    HaloCatalogDataset, HaloCatalogFieldInfo, \
-    RockstarDataset, RockstarFieldInfo
-
-#from yt.frontends.chombo.api import \
-#    ChomboDataset, ChomboFieldInfo, add_chombo_field
-
-#from yt.frontends.gdf.api import \
-#    GDFDataset, GDFFieldInfo, add_gdf_field
-
-from yt.frontends.moab.api import \
-    MoabHex8Dataset, MoabFieldInfo, \
-    PyneMoabHex8Dataset, PyneFieldInfo
-
-from yt.frontends.athena.api import \
-    AthenaDataset, AthenaFieldInfo
-
-from yt.frontends.art.api import \
-    ARTDataset, ARTFieldInfo
-
-#from yt.frontends.pluto.api import \
-#     PlutoDataset, PlutoFieldInfo, add_pluto_field
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
 
 from yt.frontends.stream.api import \
-    StreamDataset, \
-    StreamHandler, load_uniform_grid, load_amr_grids, \
+    load_uniform_grid, load_amr_grids, \
     load_particles, load_hexahedral_mesh, load_octree
 
-from yt.frontends.sph.api import \
-    OWLSDataset, SPHFieldInfo, \
-    GadgetDataset, GadgetHDF5Dataset, \
-    TipsyDataset
-
 # For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
 GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
 TipsyStaticOutput = deprecated_class(TipsyDataset)
 
-#from yt.analysis_modules.list_modules import \
-#    get_available_modules, amods
-#available_analysis_modules = get_available_modules()
-
-from yt.frontends.fits.api import \
-    FITSDataset, FITSFieldInfo
-
 # Import our analysis modules
 from yt.analysis_modules.halo_finding.api import \
     HaloFinder
@@ -166,9 +106,6 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
 
-for name, cls in callback_registry.items():
-    exec("%s = cls" % name)
-
 from yt.convenience import \
     load, projload, simulation
 


https://bitbucket.org/yt_analysis/yt/commits/28627812514d/
Changeset:   28627812514d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 14:15:12
Summary:     Implementing a lot more of YTEP-0019.
Affected #:  2 files

diff -r d56e687090ba46a409e1bba619ecab857ff84dbb -r 28627812514d05954581c4ca74c2a45cd109bf69 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -725,3 +725,14 @@
         return cls(*args, **kwargs)
     return _func
     
+def enable_plugins():
+    from yt.config import ytcfg
+    my_plugin_name = ytcfg.get("yt","pluginfilename")
+    # We assume that it is with respect to the $HOME/.yt directory
+    if os.path.isfile(my_plugin_name):
+        _fn = my_plugin_name
+    else:
+        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+    if os.path.isfile(_fn):
+        mylog.info("Loading plugins from %s", _fn)
+        execfile(_fn)

diff -r d56e687090ba46a409e1bba619ecab857ff84dbb -r 28627812514d05954581c4ca74c2a45cd109bf69 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -21,8 +21,6 @@
 #
 
 # First module imports
-import sys, types, os, glob, cPickle, time
-import numpy as na # For historical reasons
 import numpy as np # For modern purposes
 import numpy # In case anyone wishes to use it by name
 
@@ -35,9 +33,23 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-from yt.funcs import *
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
 from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.performance_counters import yt_counters, time_function
 from yt.config import ytcfg, ytcfg_defaults
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
@@ -64,8 +76,7 @@
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    data_object_registry, \
-    DatasetSeries, AnalysisTask, analysis_task, \
+    DatasetSeries, \
     ImageArray, particle_filter, create_profile, \
     Profile1D, Profile2D, Profile3D
 
@@ -82,32 +93,24 @@
 TipsyDataset = frontends.sph.TipsyDataset
 TipsyStaticOutput = deprecated_class(TipsyDataset)
 
-# Import our analysis modules
-from yt.analysis_modules.halo_finding.api import \
-    HaloFinder
-
-from yt.utilities.definitions import \
-    axis_names, x_dict, y_dict, inv_axis_names
-
 # Now individual component imports from the visualization API
 from yt.visualization.api import \
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    callback_registry, write_bitmap, write_image, \
+    write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, ProfilePlot, PhasePlot
 
 from yt.visualization.volume_rendering.api import \
-    ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera, off_axis_projection, MosaicFisheyeCamera
+    off_axis_projection
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
 
 from yt.convenience import \
-    load, projload, simulation
+    load, simulation
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
@@ -121,12 +124,4 @@
 # Unfortunately, for now, I think the easiest and simplest way of doing
 # this is also the most dangerous way.
 if ytcfg.getboolean("yt","loadfieldplugins"):
-    my_plugin_name = ytcfg.get("yt","pluginfilename")
-    # We assume that it is with respect to the $HOME/.yt directory
-    if os.path.isfile(my_plugin_name):
-        _fn = my_plugin_name
-    else:
-        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
-    if os.path.isfile(_fn):
-        mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+    enable_plugins()


https://bitbucket.org/yt_analysis/yt/commits/a8ba20388c0d/
Changeset:   a8ba20388c0d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 14:29:19
Summary:     Implementing more of YTEP-0019.
Affected #:  3 files

diff -r 28627812514d05954581c4ca74c2a45cd109bf69 -r a8ba20388c0df217d8b97b564fe5b7904a3065c9 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -18,8 +18,11 @@
 
 import argparse, os, sys
 
-from yt.config import ytcfg
-from yt.funcs import *
+from .config import ytcfg
+from .funcs import *
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    enable_parallelism
 
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
@@ -47,6 +50,9 @@
             if ytcfg.getboolean("yt","LogFile"):
                 ytcfg["yt","LogFile"] = "False"
                 yt.utilities.logger.disable_file_logging()
+        # Now we have to turn on the parallelism from the perspective of the
+        # parallel_analysis_interface
+        enable_parallelism()
     return parallel_capable
 
 # This fallback is for Paraview:

diff -r 28627812514d05954581c4ca74c2a45cd109bf69 -r a8ba20388c0df217d8b97b564fe5b7904a3065c9 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -47,3 +47,26 @@
     from .framework import AnswerTesting
 except ImportError:
     raise
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys, yt
+    from yt.funcs import mylog
+    orig_level = mylog.getEffectiveLevel()
+    mylog.setLevel(50)
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(yt.__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        mylog.setLevel(orig_level)

diff -r 28627812514d05954581c4ca74c2a45cd109bf69 -r a8ba20388c0df217d8b97b564fe5b7904a3065c9 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -33,8 +33,25 @@
 
 parallel_capable = ytcfg.getboolean("yt", "__parallel")
 
+dtype_names = dict(
+        float32 = "MPI.FLOAT",
+        float64 = "MPI.DOUBLE",
+        int32   = "MPI.INT",
+        int64   = "MPI.LONG",
+        c       = "MPI.CHAR",
+)
+op_names = dict(
+        sum = "MPI.SUM",
+        min = "MPI.MIN",
+        max = "MPI.MAX"
+)
+
 # Set up translation table and import things
-if parallel_capable:
+
+def enable_parallelism():
+    global parallel_capable
+    parallel_capable = ytcfg.getboolean("yt", "__parallel")
+    if not parallel_capable: return False
     from mpi4py import MPI
     yt.utilities.logger.uncolorize_logging()
     # Even though the uncolorize function already resets the format string,
@@ -48,32 +65,18 @@
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")
-    dtype_names = dict(
+    dtype_names.update(dict(
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
             int32   = MPI.INT,
             int64   = MPI.LONG,
             c       = MPI.CHAR,
-    )
-    op_names = dict(
+    ))
+    op_names.update(dict(
         sum = MPI.SUM,
         min = MPI.MIN,
         max = MPI.MAX
-    )
-
-else:
-    dtype_names = dict(
-            float32 = "MPI.FLOAT",
-            float64 = "MPI.DOUBLE",
-            int32   = "MPI.INT",
-            int64   = "MPI.LONG",
-            c       = "MPI.CHAR",
-    )
-    op_names = dict(
-            sum = "MPI.SUM",
-            min = "MPI.MIN",
-            max = "MPI.MAX"
-    )
+    ))
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.


https://bitbucket.org/yt_analysis/yt/commits/b10724f9b16a/
Changeset:   b10724f9b16a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 14:53:13
Summary:     Speed up zipping the cmaps.
Affected #:  1 file

diff -r a8ba20388c0df217d8b97b564fe5b7904a3065c9 -r b10724f9b16a0de66125ebfc058c883ce09e4f6b yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -11,6 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import numpy as np
+from itertools import izip
 
 import matplotlib
 import matplotlib.colors as cc
@@ -82,9 +83,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
-         'green':zip(_vs,_kamae_grn,_kamae_grn),
-         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':izip(_vs,_kamae_red,_kamae_red),
+         'green':izip(_vs,_kamae_grn,_kamae_grn),
+         'blue':izip(_vs,_kamae_blu,_kamae_blu)}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -147,9 +148,9 @@
 _vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': zip(_vs,v[0],v[0]),
-                  'green': zip(_vs,v[1],v[1]),
-                  'blue': zip(_vs,v[2],v[2]) }
+        cdict = { 'red': izip(_vs,v[0],v[0]),
+                  'green': izip(_vs,v[1],v[1]),
+                  'blue': izip(_vs,v[2],v[2]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):


https://bitbucket.org/yt_analysis/yt/commits/b3febc76f9f1/
Changeset:   b3febc76f9f1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 14:53:23
Summary:     Import parallel_analysis_interface on demand.
Affected #:  1 file

diff -r b10724f9b16a0de66125ebfc058c883ce09e4f6b -r b3febc76f9f150f776c54910f8e31ded755840cd yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -18,11 +18,8 @@
 
 import argparse, os, sys
 
-from .config import ytcfg
-from .funcs import *
-
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    enable_parallelism
+from yt.config import ytcfg
+from yt.funcs import *
 
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
@@ -52,6 +49,9 @@
                 yt.utilities.logger.disable_file_logging()
         # Now we have to turn on the parallelism from the perspective of the
         # parallel_analysis_interface
+        from yt.utilities.parallel_tools.parallel_analysis_interface import \
+            enable_parallelism
+
         enable_parallelism()
     return parallel_capable
 


https://bitbucket.org/yt_analysis/yt/commits/3ff631fd1ffb/
Changeset:   3ff631fd1ffb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 15:01:27
Summary:     Adding more YTEP-0019 switching.
Affected #:  2 files

diff -r b3febc76f9f150f776c54910f8e31ded755840cd -r 3ff631fd1ffb8ef3bd651b79c48a5dbed895a5b1 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -74,25 +74,85 @@
 
 __version__ = "3.0-dev"
 
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys
-    from yt.config import ytcfg
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg.set("yt","suppressStreamLogging", 'True')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
+# First module imports
+import numpy as np # For modern purposes
+import numpy # In case anyone wishes to use it by name
+
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
+from yt.utilities.logger import ytLogger as mylog
+
+import yt.utilities.physical_constants as physical_constants
+import yt.units as units
+from yt.units.yt_array import YTArray, YTQuantity
+
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
+
+from yt.data_objects.api import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    DatasetSeries, \
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
+
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
+
+from yt.frontends.stream.api import \
+    load_uniform_grid, load_amr_grids, \
+    load_particles, load_hexahedral_mesh, load_octree
+
+# For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
+GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
+TipsyStaticOutput = deprecated_class(TipsyDataset)
+
+# Now individual component imports from the visualization API
+from yt.visualization.api import \
+    PlotCollection, PlotCollectionInteractive, \
+    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
+    apply_colormap, scale_image, write_projection, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps, ProfilePlot, PhasePlot
+
+from yt.visualization.volume_rendering.api import \
+    off_axis_projection
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects
+
+from yt.convenience import \
+    load, simulation
+
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles, periodic_position
+
+

diff -r b3febc76f9f150f776c54910f8e31ded755840cd -r 3ff631fd1ffb8ef3bd651b79c48a5dbed895a5b1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -20,9 +20,8 @@
 # ALL IMPORTS GO HERE
 #
 
-# First module imports
-import numpy as np # For modern purposes
-import numpy # In case anyone wishes to use it by name
+import os
+from yt import *
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
@@ -33,27 +32,7 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-from yt.funcs import \
-    iterable, \
-    get_memory_usage, \
-    print_tb, \
-    rootonly, \
-    insert_ipython, \
-    get_pbar, \
-    only_on_root, \
-    is_root, \
-    get_version_stack, \
-    get_yt_supp, \
-    get_yt_version, \
-    parallel_profile, \
-    enable_plugins, \
-    memory_checker, \
-    deprecated_class
-from yt.utilities.logger import ytLogger as mylog
 from yt.config import ytcfg, ytcfg_defaults
-import yt.utilities.physical_constants as physical_constants
-import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
 
 from yt.utilities.logger import level as __level
 if __level >= int(ytcfg_defaults["loglevel"]):
@@ -61,62 +40,6 @@
     mylog.debug("Turning off NumPy error reporting")
     np.seterr(all = 'ignore')
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
-
-from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    DatasetSeries, \
-    ImageArray, particle_filter, create_profile, \
-    Profile1D, Profile2D, Profile3D
-
-from yt.frontends.api import _frontend_container
-frontends = _frontend_container()
-
-from yt.frontends.stream.api import \
-    load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh, load_octree
-
-# For backwards compatibility
-GadgetDataset = frontends.sph.GadgetDataset
-GadgetStaticOutput = deprecated_class(GadgetDataset)
-TipsyDataset = frontends.sph.TipsyDataset
-TipsyStaticOutput = deprecated_class(TipsyDataset)
-
-# Now individual component imports from the visualization API
-from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, \
-    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
-    ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
-
-from yt.visualization.volume_rendering.api import \
-    off_axis_projection
-
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects
-
-from yt.convenience import \
-    load, simulation
-
-# Import some helpful math utilities
-from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position
-
-
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions
 # that get used every time that they don't have to *define* every time.


https://bitbucket.org/yt_analysis/yt/commits/cdf62d7c654a/
Changeset:   cdf62d7c654a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 15:19:18
Summary:     Fixing setup.py from sed mistake.
Affected #:  1 file

diff -r 3ff631fd1ffb8ef3bd651b79c48a5dbed895a5b1 -r cdf62d7c654ac31c0d0e23f6c6f42210574e48d0 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -7,7 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('index_subset', parent_package, top_path)
+    config = Configuration('hierarchy_subset', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/6e7a2fa51b9a/
Changeset:   6e7a2fa51b9a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-18 15:32:38
Summary:     Fixing imports.
Affected #:  1 file

diff -r cdf62d7c654ac31c0d0e23f6c6f42210574e48d0 -r 6e7a2fa51b9a0a403ebcd1d30462abc42fcd7b4a yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -20,9 +20,9 @@
 from yt.testing import \
     fake_random_pf
 import numpy as np
-from yt.mods import ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.volume_rendering.api import \
-    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera
+    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
+    ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase
 


https://bitbucket.org/yt_analysis/yt/commits/3e9d2f3d956b/
Changeset:   3e9d2f3d956b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-20 12:38:20
Summary:     Merging from ytep0019 into autosph
Affected #:  9 files

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -74,25 +74,85 @@
 
 __version__ = "3.0-dev"
 
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys
-    from yt.config import ytcfg
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg.set("yt","suppressStreamLogging", 'True')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
+# First module imports
+import numpy as np # For modern purposes
+import numpy # In case anyone wishes to use it by name
+
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
+from yt.utilities.logger import ytLogger as mylog
+
+import yt.utilities.physical_constants as physical_constants
+import yt.units as units
+from yt.units.yt_array import YTArray, YTQuantity
+
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
+
+from yt.data_objects.api import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    DatasetSeries, \
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
+
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
+
+from yt.frontends.stream.api import \
+    load_uniform_grid, load_amr_grids, \
+    load_particles, load_hexahedral_mesh, load_octree
+
+# For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
+GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
+TipsyStaticOutput = deprecated_class(TipsyDataset)
+
+# Now individual component imports from the visualization API
+from yt.visualization.api import \
+    PlotCollection, PlotCollectionInteractive, \
+    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
+    apply_colormap, scale_image, write_projection, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps, ProfilePlot, PhasePlot
+
+from yt.visualization.volume_rendering.api import \
+    off_axis_projection
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects
+
+from yt.convenience import \
+    load, simulation
+
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles, periodic_position
+
+

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -7,7 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('index_subset', parent_package, top_path)
+    config = Configuration('hierarchy_subset', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -12,3 +12,29 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import sys, types, os, glob, cPickle, time, importlib
+
+_frontends = [
+    'art',
+    'artio',
+    'athena',
+    'boxlib',
+    #'chombo',
+    'enzo',
+    'fits',
+    'flash',
+    'gdf',
+    'halo_catalogs',
+    'moab',
+    #'pluto',
+    'ramses',
+    'sph',
+    'stream',
+]
+
+class _frontend_container:
+    def __init__(self):
+        for frontend in _frontends:
+            _mod = "yt.frontends.%s.api" % frontend
+            setattr(self, frontend, importlib.import_module(_mod))

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -725,3 +725,14 @@
         return cls(*args, **kwargs)
     return _func
     
+def enable_plugins():
+    from yt.config import ytcfg
+    my_plugin_name = ytcfg.get("yt","pluginfilename")
+    # We assume that it is with respect to the $HOME/.yt directory
+    if os.path.isfile(my_plugin_name):
+        _fn = my_plugin_name
+    else:
+        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+    if os.path.isfile(_fn):
+        mylog.info("Loading plugins from %s", _fn)
+        execfile(_fn)

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -20,11 +20,8 @@
 # ALL IMPORTS GO HERE
 #
 
-# First module imports
-import sys, types, os, glob, cPickle, time
-import numpy as na # For historical reasons
-import numpy as np # For modern purposes
-import numpy # In case anyone wishes to use it by name
+import os
+from yt import *
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
@@ -35,13 +32,7 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.performance_counters import yt_counters, time_function
 from yt.config import ytcfg, ytcfg_defaults
-import yt.utilities.physical_constants as physical_constants
-import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
 
 from yt.utilities.logger import level as __level
 if __level >= int(ytcfg_defaults["loglevel"]):
@@ -49,134 +40,6 @@
     mylog.debug("Turning off NumPy error reporting")
     np.seterr(all = 'ignore')
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
-
-from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    data_object_registry, \
-    DatasetSeries, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile, \
-    Profile1D, Profile2D, Profile3D
-
-from yt.frontends.enzo.api import \
-    EnzoDataset, EnzoDatasetInMemory, \
-    EnzoSimulation, EnzoFieldInfo, add_enzo_field
-
-# Boxlib stuff
-from yt.frontends.boxlib.api import \
-    BoxlibDataset
-
-# Orion stuff
-#from yt.frontends.boxlib.api import \
-#    OrionDataset, OrionFieldInfo, add_orion_field
-
-# Maestro stuff
-#from yt.frontends.boxlib.api import \
-#    MaestroDataset
-
-# Castro stuff
-#from yt.frontends.boxlib.api import \
-#    CastroDataset
-
-from yt.frontends.flash.api import \
-    FLASHDataset, FLASHFieldInfo
-
-from yt.frontends.artio.api import \
-    ARTIODataset, ARTIOFieldInfo
-
-from yt.frontends.ramses.api import \
-    RAMSESDataset, RAMSESFieldInfo
-
-from yt.frontends.halo_catalogs.api import \
-    HaloCatalogDataset, HaloCatalogFieldInfo, \
-    RockstarDataset, RockstarFieldInfo
-
-#from yt.frontends.chombo.api import \
-#    ChomboDataset, ChomboFieldInfo, add_chombo_field
-
-from yt.frontends.gdf.api import \
-    GDFDataset, GDFFieldInfo, add_gdf_field
-
-from yt.frontends.moab.api import \
-    MoabHex8Dataset, MoabFieldInfo, \
-    PyneMoabHex8Dataset, PyneFieldInfo
-
-from yt.frontends.athena.api import \
-    AthenaDataset, AthenaFieldInfo
-
-from yt.frontends.art.api import \
-    ARTDataset, ARTFieldInfo
-
-#from yt.frontends.pluto.api import \
-#     PlutoDataset, PlutoFieldInfo, add_pluto_field
-
-from yt.frontends.stream.api import \
-    StreamDataset, \
-    StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh, load_octree
-
-from yt.frontends.sph.api import \
-    OWLSDataset, SPHFieldInfo, \
-    GadgetDataset, GadgetHDF5Dataset, \
-    TipsyDataset
-
-# For backwards compatibility
-GadgetStaticOutput = deprecated_class(GadgetDataset)
-TipsyStaticOutput = deprecated_class(TipsyDataset)
-
-#from yt.analysis_modules.list_modules import \
-#    get_available_modules, amods
-#available_analysis_modules = get_available_modules()
-
-from yt.frontends.fits.api import \
-    FITSDataset, FITSFieldInfo
-
-# Import our analysis modules
-from yt.analysis_modules.halo_finding.api import \
-    HaloFinder
-
-from yt.utilities.definitions import \
-    axis_names, x_dict, y_dict, inv_axis_names
-
-# Now individual component imports from the visualization API
-from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    callback_registry, write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, \
-    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
-    ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
-
-from yt.visualization.volume_rendering.api import \
-    ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    HomogenizedVolume, Camera, off_axis_projection, MosaicFisheyeCamera
-
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects
-
-for name, cls in callback_registry.items():
-    exec("%s = cls" % name)
-
-from yt.convenience import \
-    load, projload, simulation
-
-# Import some helpful math utilities
-from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position
-
-
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions
 # that get used every time that they don't have to *define* every time.
@@ -184,12 +47,4 @@
 # Unfortunately, for now, I think the easiest and simplest way of doing
 # this is also the most dangerous way.
 if ytcfg.getboolean("yt","loadfieldplugins"):
-    my_plugin_name = ytcfg.get("yt","pluginfilename")
-    # We assume that it is with respect to the $HOME/.yt directory
-    if os.path.isfile(my_plugin_name):
-        _fn = my_plugin_name
-    else:
-        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
-    if os.path.isfile(_fn):
-        mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+    enable_plugins()

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -47,6 +47,12 @@
             if ytcfg.getboolean("yt","LogFile"):
                 ytcfg["yt","LogFile"] = "False"
                 yt.utilities.logger.disable_file_logging()
+        # Now we have to turn on the parallelism from the perspective of the
+        # parallel_analysis_interface
+        from yt.utilities.parallel_tools.parallel_analysis_interface import \
+            enable_parallelism
+
+        enable_parallelism()
     return parallel_capable
 
 # This fallback is for Paraview:

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -47,3 +47,26 @@
     from .framework import AnswerTesting
 except ImportError:
     raise
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys, yt
+    from yt.funcs import mylog
+    orig_level = mylog.getEffectiveLevel()
+    mylog.setLevel(50)
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(yt.__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        mylog.setLevel(orig_level)

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -33,8 +33,25 @@
 
 parallel_capable = ytcfg.getboolean("yt", "__parallel")
 
+dtype_names = dict(
+        float32 = "MPI.FLOAT",
+        float64 = "MPI.DOUBLE",
+        int32   = "MPI.INT",
+        int64   = "MPI.LONG",
+        c       = "MPI.CHAR",
+)
+op_names = dict(
+        sum = "MPI.SUM",
+        min = "MPI.MIN",
+        max = "MPI.MAX"
+)
+
 # Set up translation table and import things
-if parallel_capable:
+
+def enable_parallelism():
+    global parallel_capable
+    parallel_capable = ytcfg.getboolean("yt", "__parallel")
+    if not parallel_capable: return False
     from mpi4py import MPI
     yt.utilities.logger.uncolorize_logging()
     # Even though the uncolorize function already resets the format string,
@@ -48,32 +65,18 @@
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")
-    dtype_names = dict(
+    dtype_names.update(dict(
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
             int32   = MPI.INT,
             int64   = MPI.LONG,
             c       = MPI.CHAR,
-    )
-    op_names = dict(
+    ))
+    op_names.update(dict(
         sum = MPI.SUM,
         min = MPI.MIN,
         max = MPI.MAX
-    )
-
-else:
-    dtype_names = dict(
-            float32 = "MPI.FLOAT",
-            float64 = "MPI.DOUBLE",
-            int32   = "MPI.INT",
-            int64   = "MPI.LONG",
-            c       = "MPI.CHAR",
-    )
-    op_names = dict(
-            sum = "MPI.SUM",
-            min = "MPI.MIN",
-            max = "MPI.MAX"
-    )
+    ))
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.

diff -r f29080ca4f88070f75ff2da01db7255352d6ef43 -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -11,6 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import numpy as np
+from itertools import izip
 
 import matplotlib
 import matplotlib.colors as cc
@@ -82,9 +83,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
-         'green':zip(_vs,_kamae_grn,_kamae_grn),
-         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':izip(_vs,_kamae_red,_kamae_red),
+         'green':izip(_vs,_kamae_grn,_kamae_grn),
+         'blue':izip(_vs,_kamae_blu,_kamae_blu)}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -147,9 +148,9 @@
 _vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': zip(_vs,v[0],v[0]),
-                  'green': zip(_vs,v[1],v[1]),
-                  'blue': zip(_vs,v[2],v[2]) }
+        cdict = { 'red': izip(_vs,v[0],v[0]),
+                  'green': izip(_vs,v[1],v[1]),
+                  'blue': izip(_vs,v[2],v[2]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):


https://bitbucket.org/yt_analysis/yt/commits/cb7b81ea07aa/
Changeset:   cb7b81ea07aa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-20 12:48:34
Summary:     Merging with the fixed ytep0019 bookmark
Affected #:  1 file

diff -r 3e9d2f3d956b5f272185e4e55b128486dcd2e22a -r cb7b81ea07aacd278858bf82a99af266d073f057 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -20,9 +20,9 @@
 from yt.testing import \
     fake_random_pf
 import numpy as np
-from yt.mods import ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.volume_rendering.api import \
-    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera
+    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
+    ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase
 


https://bitbucket.org/yt_analysis/yt/commits/92f0d46ff1a2/
Changeset:   92f0d46ff1a2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-20 12:50:04
Summary:     Merging with mainline experimental
Affected #:  6 files

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -15,22 +15,56 @@
 
 import numpy as np
 
-from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.fields.particle_fields import \
-    particle_deposition_functions, \
-    particle_vector_functions, \
-    standard_particle_fields
 from yt.utilities.physical_constants import \
     mh, \
-    mass_sun_cgs
+    mass_sun_cgs, \
+    amu_cgs
 from yt.funcs import *
+from yt.utilities.chemical_formulas import \
+    ChemicalFormula
 
+# See YTEP-0003 for details, but we want to ensure these fields are all
+# populated:
+#
+#   * _mass
+#   * _density
+#   * _fraction
+#   * _number_density
+#
+
+def _create_fraction_func(ftype, species):
+    def _frac(field, data):
+        return data[ftype, "%s_density" % species] \
+             / data[ftype, "density"]
+    return _frac
+
+def _create_mass_func(ftype, species):
+    def _mass(field, data):
+        return data[ftype, "%s_density" % species] \
+             * data["index", "cell_volume"]
+    return _mass
+
+def _create_number_density_func(ftype, species):
+    formula = ChemicalFormula(species)
+    weight = formula.weight # This is in AMU
+    weight *= amu_cgs
+    def _number_density(field, data):
+        return data[ftype, "%s_density" % species] \
+             / amu_cgs
+    return _number_density
+
+def add_species_field_by_density(registry, ftype, species):
+    """
+    This takes a field registry, a fluid type, and a species name and then
+    adds the other fluids based on that.  This assumes that the field
+    "SPECIES_density" already exists and refers to mass density.
+    """
+    registry.add_field((ftype, "%s_fraction" % species), 
+                        function = _create_fraction_func(ftype, species),
+                        units = "")
+    registry.add_field((ftype, "%s_mass" % species),
+                        function = _create_mass_func(ftype, species),
+                        units = "g")
+    registry.add_field((ftype, "%s_number_density" % species),
+                        function = _create_number_density_func(ftype, species),
+                        units = "cm**-3")

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -20,9 +20,10 @@
     FieldInfoContainer
 from yt.units.yt_array import \
     YTArray
-
+from yt.fields.species_fields import \
+    add_species_field_by_density
 from yt.utilities.physical_constants import \
-    mh, \
+    mh, me, mp, \
     mass_sun_cgs
 
 b_units = "code_magnetic"
@@ -30,22 +31,20 @@
 rho_units = "code_mass / code_length**3"
 vel_units = "code_velocity"
 
-known_species_masses = dict(
-  (sp, mh * v) for sp, v in [
-                ("HI", 1.0),
-                ("HII", 1.0),
-                ("Electron", 1.0),
-                ("HeI", 4.0),
-                ("HeII", 4.0),
-                ("HeIII", 4.0),
-                ("H2I", 2.0),
-                ("H2II", 2.0),
-                ("HM", 1.0),
-                ("DI", 2.0),
-                ("DII", 2.0),
-                ("HDI", 3.0),
-    ])
-
+known_species_names = {
+    'HI'      : 'H',
+    'HII'     : 'H_p1',
+    'HeI'     : 'He',
+    'HeII'    : 'He_p1',
+    'HeIII'   : 'He_p2',
+    'H2I'     : 'H2',
+    'H2II'    : 'H2_p1',
+    'HM'      : 'H_m1',
+    'DI'      : 'D',
+    'DII'     : 'D_p1',
+    'HD'      : 'HD',
+    'Electron': 'El'
+}
 
 class EnzoFieldInfo(FieldInfoContainer):
     known_other_fields = (
@@ -72,6 +71,8 @@
         ("Density", (rho_units, ["density"], None)),
         ("Metal_Density", (rho_units, ["metal_density"], None)),
         ("SN_Colour", (rho_units, [], None)),
+        # Note: we do not alias Electron_Density to anything
+        ("Electron_Density", (rho_units, [], None)),
     )
 
     known_particle_fields = (
@@ -117,46 +118,24 @@
         self.add_output_field(("enzo", "%s_Density" % species),
                            take_log=True,
                            units="code_mass/code_length**3")
-        self.alias(("gas", "%s_density" % species),
+        yt_name = known_species_names[species]
+        self.alias(("gas", "%s_density" % yt_name),
                    ("enzo", "%s_Density" % species))
-        def _species_mass(field, data):
-            return data["gas", "%s_density" % species] \
-                 * data["cell_volume"]
-        self.add_field(("gas", "%s_mass" % species),
-                           function=_species_mass,
-                           units = "g")
-        def _species_fraction(field, data):
-            return data["gas", "%s_density" % species] \
-                 / data["gas","density"]
-        self.add_field(("gas", "%s_fraction" % species),
-                           function=_species_fraction,
-                           units = "")
-        def _species_number_density(field, data):
-            return data["gas", "%s_density" % species] \
-                / known_species_masses[species]
-        self.add_field(("gas", "%s_number_density" % species),
-                           function=_species_number_density,
-                           units = "1/cm**3")
+        add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
         species_names = [fn.rsplit("_Density")[0] for ft, fn in 
                          self.field_list if fn.endswith("_Density")]
         species_names = [sp for sp in species_names
-                         if sp in known_species_masses]
+                         if sp in known_species_names]
+        def _electron_density(field, data):
+            return data["Electron_Density"] * (me/mp)
+        self.add_field(("gas", "El_density"),
+                       function = _electron_density,
+                       units = "g/cm**3")
         for sp in species_names:
             self.add_species_field(sp)
-        def _number_density(_sp_list, masses):
-            def _num_dens_func(field, data):
-                num = data.pf.arr(np.zeros_like(data["density"], np.float64),
-                                  "1/cm**3")
-                for sp in _sp_list:
-                    num += data["%s_density" % sp] / masses[sp]
-                return num
-            return _num_dens_func
-        func = _number_density(species_names, known_species_masses)
-        self.add_field(("gas", "number_density"),
-                           function = func,
-                           units = "1 / cm**3")
+
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/utilities/chemical_formulas.py
--- /dev/null
+++ b/yt/utilities/chemical_formulas.py
@@ -0,0 +1,44 @@
+"""
+Very basic chemical formula parser.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import string
+import re
+from .periodic_table import periodic_table
+
+class ChemicalFormula:
+    def __init__(self, formula_string):
+        # See YTEP-0003 for information on the format.
+        self.formula_string = formula_string
+        self.elements = []
+        if "_" in self.formula_string:
+            molecule, ionization = self.formula_string.split("_")
+            if ionization[0] == "p":
+                charge = int(ionization[1:])
+            elif ionization[0] == "m":
+                charge = -int(ionization[1:])
+            else:
+                raise NotImplementedError
+        else:
+            molecule = self.formula_string
+            charge = 0
+        self.charge = charge
+        for element, count in re.findall(r'([A-Z][a-z]*)(\d*)', molecule):
+            if count == '': count = 1
+            self.elements.append((periodic_table[element], int(count)))
+        self.weight = sum(n * e.weight for e, n in self.elements)
+
+    def __repr__(self):
+        return self.formula_string

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/utilities/periodic_table.py
--- /dev/null
+++ b/yt/utilities/periodic_table.py
@@ -0,0 +1,178 @@
+"""
+A simple periodic table.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import numbers
+import types
+
+_elements = (
+    (1, 1.0079400000, "Hydrogen", "H"),
+    (2, 4.0026020000, "Helium", "He"),
+    (3, 6.9410000000, "Lithium", "Li"),
+    (4, 9.0121820000, "Beryllium", "Be"),
+    (5, 10.8110000000, "Boron", "B"),
+    (6, 12.0107000000, "Carbon", "C"),
+    (7, 14.0067000000, "Nitrogen", "N"),
+    (8, 15.9994000000, "Oxygen", "O"),
+    (9, 18.9994000000, "Fluorine", "F"),
+    (10, 20.1797000000, "Neon", "Ne"),
+    (11, 22.9897692800, "Sodium", "Na"),
+    (12, 24.3050000000, "Magnesium", "Mg"),
+    (13, 26.9815386000, "Aluminium", "Al"),
+    (14, 28.0855000000, "Silicon", "Si"),
+    (15, 30.9737620000, "Phosphorus", "P"),
+    (16, 32.0650000000, "Sulphur", "S"),
+    (17, 35.4530000000, "Chlorine", "Cl"),
+    (18, 39.9480000000, "Argon", "Ar"),
+    (19, 39.0983000000, "Potassium", "K"),
+    (20, 40.0780000000, "Calcium", "Ca"),
+    (21, 44.9559120000, "Scandium", "Sc"),
+    (22, 47.8670000000, "Titanium", "Ti"),
+    (23, 50.9415000000, "Vanadium", "V"),
+    (24, 51.9961000000, "Chromium", "Cr"),
+    (25, 54.9380450000, "Manganese", "Mn"),
+    (26, 55.8450000000, "Iron", "Fe"),
+    (27, 58.9331950000, "Cobalt", "Co"),
+    (28, 58.6934000000, "Nickel", "Ni"),
+    (29, 63.5460000000, "Copper", "Cu"),
+    (30, 65.3800000000, "Zinc", "Zn"),
+    (31, 69.7230000000, "Gallium", "Ga"),
+    (32, 72.6400000000, "Germanium", "Ge"),
+    (33, 74.9216000000, "Arsenic", "As"),
+    (34, 78.9600000000, "Selenium", "Se"),
+    (35, 79.9040000000, "Bromine", "Br"),
+    (36, 83.7980000000, "Krypton", "Kr"),
+    (37, 85.4678000000, "Rubidium", "Rb"),
+    (38, 87.6200000000, "Strontium", "Sr"),
+    (39, 88.9058500000, "Yttrium", "Y"),
+    (40, 91.2240000000, "Zirkonium", "Zr"),
+    (41, 92.9063800000, "Niobium", "Nb"),
+    (42, 95.9600000000, "Molybdaenum", "Mo"),
+    (43, 98.0000000000, "Technetium", "Tc"),
+    (44, 101.0700000000, "Ruthenium", "Ru"),
+    (45, 102.9055000000, "Rhodium", "Rh"),
+    (46, 106.4200000000, "Palladium", "Pd"),
+    (47, 107.8682000000, "Silver", "Ag"),
+    (48, 112.4110000000, "Cadmium", "Cd"),
+    (49, 114.8180000000, "Indium", "In"),
+    (50, 118.7100000000, "Tin", "Sn"),
+    (51, 121.7600000000, "Antimony", "Sb"),
+    (52, 127.6000000000, "Tellurium", "Te"),
+    (53, 126.9044700000, "Iodine", "I"),
+    (54, 131.2930000000, "Xenon", "Xe"),
+    (55, 132.9054519000, "Cesium", "Cs"),
+    (56, 137.3270000000, "Barium", "Ba"),
+    (57, 138.9054700000, "Lanthanum", "La"),
+    (58, 140.1160000000, "Cerium", "Ce"),
+    (59, 140.9076500000, "Praseodymium", "Pr"),
+    (60, 144.2420000000, "Neodymium", "Nd"),
+    (61, 145.0000000000, "Promethium", "Pm"),
+    (62, 150.3600000000, "Samarium", "Sm"),
+    (63, 151.9640000000, "Europium", "Eu"),
+    (64, 157.2500000000, "Gadolinium", "Gd"),
+    (65, 158.9253500000, "Terbium", "Tb"),
+    (66, 162.5001000000, "Dysprosium", "Dy"),
+    (67, 164.9303200000, "Holmium", "Ho"),
+    (68, 167.2590000000, "Erbium", "Er"),
+    (69, 168.9342100000, "Thulium", "Tm"),
+    (70, 173.0540000000, "Ytterbium", "Yb"),
+    (71, 174.9668000000, "Lutetium", "Lu"),
+    (72, 178.4900000000, "Hafnium", "Hf"),
+    (73, 180.9478800000, "Tantalum", "Ta"),
+    (74, 183.8400000000, "Tungsten", "W"),
+    (75, 186.2070000000, "Rhenium", "Re"),
+    (76, 190.2300000000, "Osmium", "Os"),
+    (77, 192.2170000000, "Iridium", "Ir"),
+    (78, 192.0840000000, "Platinum", "Pt"),
+    (79, 196.9665690000, "Gold", "Au"),
+    (80, 200.5900000000, "Hydrargyrum", "Hg"),
+    (81, 204.3833000000, "Thallium", "Tl"),
+    (82, 207.2000000000, "Lead", "Pb"),
+    (83, 208.9804010000, "Bismuth", "Bi"),
+    (84, 210.0000000000, "Polonium", "Po"),
+    (85, 210.0000000000, "Astatine", "At"),
+    (86, 220.0000000000, "Radon", "Rn"),
+    (87, 223.0000000000, "Francium", "Fr"),
+    (88, 226.0000000000, "Radium", "Ra"),
+    (89, 227.0000000000, "Actinium", "Ac"),
+    (90, 232.0380600000, "Thorium", "Th"),
+    (91, 231.0358800000, "Protactinium", "Pa"),
+    (92, 238.0289100000, "Uranium", "U"),
+    (93, 237.0000000000, "Neptunium", "Np"),
+    (94, 244.0000000000, "Plutonium", "Pu"),
+    (95, 243.0000000000, "Americium", "Am"),
+    (96, 247.0000000000, "Curium", "Cm"),
+    (97, 247.0000000000, "Berkelium", "Bk"),
+    (98, 251.0000000000, "Californium", "Cf"),
+    (99, 252.0000000000, "Einsteinium", "Es"),
+    (100, 257.0000000000, "Fermium", "Fm"),
+    (101, 258.0000000000, "Mendelevium", "Md"),
+    (102, 259.0000000000, "Nobelium", "No"),
+    (103, 262.0000000000, "Lawrencium", "Lr"),
+    (104, 261.0000000000, "Rutherfordium", "Rf"),
+    (105, 262.0000000000, "Dubnium", "Db"),
+    (106, 266.0000000000, "Seaborgium", "Sg"),
+    (107, 264.0000000000, "Bohrium", "Bh"),
+    (108, 277.0000000000, "Hassium", "Hs"),
+    (109, 268.0000000000, "Meitnerium", "Mt"),
+    (110, 271.0000000000, "Ununnilium", "Ds"),
+    (111, 272.0000000000, "Unununium", "Rg"),
+    (112, 285.0000000000, "Ununbium", "Uub"),
+    (113, 284.0000000000, "Ununtrium", "Uut"),
+    (114, 289.0000000000, "Ununquadium", "Uuq"),
+    (115, 288.0000000000, "Ununpentium", "Uup"),
+    (116, 292.0000000000, "Ununhexium", "Uuh"),
+    (118, 294.0000000000, "Ununoctium", "Uuo"),
+    # Now some special cases that are *not* elements
+    (-1, 2.014102, "Deuterium", "D"),
+    (-1, 0.00054858, "Electron", "El"),
+)
+
+class Element:
+    def __init__(self, num, weight, name, symbol):
+        self.num = num
+        self.weight = weight
+        self.name = name
+        self.symbol = symbol
+
+    def __repr__(self):
+        return "Element: %s (%s)" % (self.symbol, self.name)
+
+class PeriodicTable:
+    def __init__(self):
+        self.elements_by_number = {}
+        self.elements_by_name = {}
+        self.elements_by_symbol = {}
+        for num, weight, name, symbol in _elements:
+            e = Element(num, weight, name, symbol)
+            self.elements_by_number[num] = e
+            self.elements_by_name[name] = e
+            self.elements_by_symbol[symbol] = e
+
+    def __getitem__(self, key):
+        if isinstance(key, (np.number, numbers.Number)):
+            d = self.elements_by_number
+        elif isinstance(key, types.StringTypes):
+            if len(key) <= 2:
+                d = self.elements_by_symbol
+            elif len(key) == 3 and key[0] == "U":
+                d = self.elements_by_symbol
+            else:
+                d = self.elements_by_name
+        else:
+            raise KeyError(key)
+        return d[key]
+
+periodic_table = PeriodicTable()

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/utilities/tests/test_chemical_formulas.py
--- /dev/null
+++ b/yt/utilities/tests/test_chemical_formulas.py
@@ -0,0 +1,23 @@
+from yt.testing import *
+from yt.utilities.chemical_formulas import ChemicalFormula
+from yt.utilities.periodic_table import periodic_table
+
+_molecules = (
+
+    ("H2O_p1", (("H", 2), ("O", 1)), 1),
+    ("H2O_m1", (("H", 2), ("O", 1)), -1),
+    ("H2O", (("H", 2), ("O", 1)), 0),
+    ("H2SO4", (("H", 2), ("S", 1), ("O", 4)), 0),
+    # Now a harder one
+    ("UuoMtUuq3", (("Uuo", 1), ("Mt", 1), ("Uuq", 3)), 0)
+)
+
+def test_formulas():
+    for formula, components, charge in _molecules:
+        f = ChemicalFormula(formula)
+        w = sum( n * periodic_table[e].weight for e, n in components)
+        yield assert_equal, f.charge, charge
+        yield assert_equal, f.weight, w
+        for (n, c1), (e, c2) in zip(components, f.elements):
+            yield assert_equal, n, e.symbol
+            yield assert_equal, c1, c2

diff -r cb7b81ea07aacd278858bf82a99af266d073f057 -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 yt/utilities/tests/test_periodic_table.py
--- /dev/null
+++ b/yt/utilities/tests/test_periodic_table.py
@@ -0,0 +1,14 @@
+from yt.testing import *
+from yt.utilities.periodic_table import _elements, periodic_table
+
+def test_element_accuracy():
+    for num, w, name, sym in _elements:
+        e0 = periodic_table[num]
+        e1 = periodic_table[name]
+        e2 = periodic_table[sym]
+        yield assert_equal, id(e0), id(e1)
+        yield assert_equal, id(e0), id(e2)
+        yield assert_equal, e0.num, num
+        yield assert_equal, e0.weight, w
+        yield assert_equal, e0.name, name
+        yield assert_equal, e0.symbol, sym


https://bitbucket.org/yt_analysis/yt/commits/219051a16966/
Changeset:   219051a16966
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-20 13:15:53
Summary:     Another merge
Affected #:  1 file

diff -r 92f0d46ff1a28ed1b0cec1d4226bdbba7ebd2a13 -r 219051a16966a33457f7fa225bae37dc6ff30af6 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -296,7 +296,7 @@
         # Here we get a copy of the file, which we skip through and read the
         # bits we want.
         oct_handler = self.oct_handler
-        all_fields = self.domain.pf.h.fluid_field_list
+        all_fields = self.domain.pf.index.fluid_field_list
         fields = [f for ft, f in fields]
         tr = {}
         cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)


https://bitbucket.org/yt_analysis/yt/commits/031ac7fd776e/
Changeset:   031ac7fd776e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-23 22:06:51
Summary:     Merging from experimental bookmark
Affected #:  14 files

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -36,6 +36,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
+yt/utilities/lib/origami.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b CITATION
--- a/CITATION
+++ b/CITATION
@@ -29,3 +29,28 @@
    adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
   adsnote = {Provided by the SAO/NASA Astrophysics Data System}
 }
+
+Using yt can also utilize other functionality.  If you utilize ORIGAMI, we ask
+that you please cite the ORIGAMI paper:
+
+ at ARTICLE{2012ApJ...754..126F,
+   author = {{Falck}, B.~L. and {Neyrinck}, M.~C. and {Szalay}, A.~S.},
+    title = "{ORIGAMI: Delineating Halos Using Phase-space Folds}",
+  journal = {\apj},
+archivePrefix = "arXiv",
+   eprint = {1201.2353},
+ primaryClass = "astro-ph.CO",
+ keywords = {dark matter, galaxies: halos, large-scale structure of universe, methods: numerical},
+     year = 2012,
+    month = aug,
+   volume = 754,
+      eid = {126},
+    pages = {126},
+      doi = {10.1088/0004-637X/754/2/126},
+   adsurl = {http://adsabs.harvard.edu/abs/2012ApJ...754..126F},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+The main homepage for ORIGAMI can be found here:
+
+http://icg.port.ac.uk/~falckb/origami.html

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -138,7 +138,7 @@
     # Create evaluated version and save it to the dest path.
     # Always use --pylab so figures appear inline
     # perhaps this is questionable?
-    nb_runner = NotebookRunner(nb_in=nb_path, pylab=True)
+    nb_runner = NotebookRunner(nb_path, pylab=False)
     nb_runner.run_notebook(skip_exceptions=skip_exceptions)
     if dest_path is None:
         dest_path = 'temp_evaluated.ipynb'

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b doc/source/examining/index.rst
--- a/doc/source/examining/index.rst
+++ b/doc/source/examining/index.rst
@@ -6,6 +6,6 @@
 .. toctree::
    :maxdepth: 2
 
-   supported_frontends_data
+   loading_data
    generic_array_data
    low_level_inspection

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b doc/source/examining/loading_data.rst
--- /dev/null
+++ b/doc/source/examining/loading_data.rst
@@ -0,0 +1,601 @@
+.. _loading-data:
+
+Loading Data
+============
+
+This section contains information on how to load data into ``yt``, as well as
+some important caveats about different data formats.
+
+.. _loading-enzo-data:
+
+Enzo Data
+---------
+
+Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
+dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
+it the parameter file name.  This would be the name of the output file, and it
+contains no extension.  For instance, if you have the following files:
+
+.. code-block:: none
+
+   DD0010/
+   DD0010/data0010
+   DD0010/data0010.index
+   DD0010/data0010.cpu0000
+   DD0010/data0010.cpu0001
+   DD0010/data0010.cpu0002
+   DD0010/data0010.cpu0003
+
+You would feed the ``load`` command the filename ``DD0010/data0010`` as
+mentioned.
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("DD0010/data0010")
+
+.. rubric:: Caveats
+
+* There are no major caveats for Enzo usage
+* Units should be correct, if you utilize standard unit-setting routines.  yt
+  will notify you if it cannot determine the units, although this
+  notification will be passive.
+* 2D and 1D data are supported, but the extraneous dimensions are set to be
+  of length 1.0 in "code length" which may produce strange results for volume
+  quantities.
+
+.. _loading-orion-data:
+
+Boxlib Data
+-----------
+
+yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
+Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
+Malone, and Matthew Turk.
+
+To load a Boxlib dataset, you can use the ``load`` command provided by
+``yt.mods`` and supply to it the directory file name.  **You must also have the
+``inputs`` file in the base directory.**  For instance, if you were in a
+directory with the following files:
+
+.. code-block:: none
+
+   inputs
+   pltgmlcs5600/
+   pltgmlcs5600/Header
+   pltgmlcs5600/Level_0
+   pltgmlcs5600/Level_0/Cell_H
+   pltgmlcs5600/Level_1
+   pltgmlcs5600/Level_1/Cell_H
+   pltgmlcs5600/Level_2
+   pltgmlcs5600/Level_2/Cell_H
+   pltgmlcs5600/Level_3
+   pltgmlcs5600/Level_3/Cell_H
+   pltgmlcs5600/Level_4
+   pltgmlcs5600/Level_4/Cell_H
+
+You would feed it the filename ``pltgmlcs5600``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("pltgmlcs5600")
+
+.. _loading-flash-data:
+
+FLASH Data
+----------
+
+FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
+FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
+supply to it the file name of a plot file or checkpoint file, but particle
+files are not currently directly loadable by themselves, due to the fact that
+they typically lack grid information. For instance, if you were in a directory
+with the following files:
+
+.. code-block:: none
+
+   cosmoSim_coolhdf5_chk_0026
+
+You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("cosmoSim_coolhdf5_chk_0026")
+
+If you have a FLASH particle file that was created at the same time as
+a plotfile or checkpoint file (therefore having particle data
+consistent with the grid structure of the latter), its data may be loaded with the
+``particle_filename`` optional argument:
+
+.. code-block:: python
+
+    from yt.mods import *
+    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
+
+.. rubric:: Caveats
+
+* Please be careful that the units are correctly utilized; yt assumes cgs.
+
+.. _loading-ramses-data:
+
+RAMSES Data
+-----------
+
+In yt-3.0, RAMSES data is fully supported.  If you are interested in taking a
+development or stewardship role, please contact the yt-dev mailing list.  To
+load a RAMSES dataset, you can use the ``load`` command provided by ``yt.mods``
+and supply to it the ``info*.txt`` filename.  For instance, if you were in a
+directory with the following files:
+
+.. code-block:: none
+
+   output_00007
+   output_00007/amr_00007.out00001
+   output_00007/grav_00007.out00001
+   output_00007/hydro_00007.out00001
+   output_00007/info_00007.txt
+   output_00007/part_00007.out00001
+
+You would feed it the filename ``output_00007/info_00007.txt``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("output_00007/info_00007.txt")
+
+yt will attempt to guess the fields in the file.  You may also specify a list
+of fields by supplying the ``fields`` keyword in your call to ``load``.
+
+.. _loading-gadget-data:
+
+Gadget Data
+-----------
+
+yt has support for reading Gadget data in both raw binary and HDF5 formats.  It
+is able to access the particles as it would any other particle dataset, and it
+can apply smoothing kernels to the data to produce both quantitative analysis
+and visualization.
+
+Gadget data in HDF5 format can be loaded with the ``load`` command:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("snapshot_061.hdf5")
+
+However, yt cannot detect raw-binary Gadget data, and so you must specify the
+format as being Gadget:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = GadgetDataset("snapshot_061")
+
+.. _particle-bbox:
+
+Units and Bounding Boxes
+++++++++++++++++++++++++
+
+There are two additional pieces of information that may be needed.  If your
+simulation is cosmological, yt can often guess the bounding box and the units
+of the simulation.  However, for isolated simulations and for cosmological
+simulations with non-standard units, these must be supplied.  For example, if
+a length unit of 1.0 corresponds to a kiloparsec, you can supply this in the
+constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``, ``Mpccm/h``
+and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h`` (``cm`` for
+comoving here) are usable unit definitions.
+
+yt will attempt to use units for ``mass``, ``length`` and ``time`` as supplied
+in the argument ``unit_base``.  The ``bounding_box`` argument is a list of
+two-item tuples or lists that describe the left and right extents of the
+particles.
+
+.. code-block:: python
+
+   pf = GadgetDataset("snap_004",
+           unit_base = {'length': ('kpc', 1.0)},
+           bounding_box = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]])
+
+.. _particle-indexing-criteria:
+
+Indexing Criteria
++++++++++++++++++
+
+yt generates a global mesh index via octree that governs the resolution of
+volume elements.  This is governed by two parameters, ``n_ref`` and
+``over_refine_factor``.  They are weak proxies for each other.  The first,
+``n_ref``, governs how many particles in an oct results in that oct being
+refined into eight child octs.  Lower values mean higher resolution; the
+default is 64.  The secon parameter, ``over_refine_factor``, governs how many
+cells are in a given oct; the default value of 1 corresponds to 8 cells.
+The number of cells in an oct is defined by the expression
+``2**(3*over_refine_factor)``.
+
+It's recommended that if you want higher-resolution, try reducing the value of
+``n_ref`` to 32 or 16.
+
+.. _gadget-field-spec:
+
+Field Specifications
+++++++++++++++++++++
+
+Binary Gadget outputs often have additional fields or particle types that are
+non-standard from the default Gadget distribution format.  These can be
+specified in the call to ``GadgetDataset`` by either supplying one of the
+sets of field specifications as a string or by supplying a field specification
+itself.  As an example, yt has built-in definitions for ``default`` (the
+default) and ``agora_unlv``.  Field specifications must be tuples, and must be
+of this format:
+
+.. code-block:: python
+
+   default = ( "Coordinates",
+               "Velocities",
+               "ParticleIDs",
+               "Mass",
+               ("InternalEnergy", "Gas"),
+               ("Density", "Gas"),
+               ("SmoothingLength", "Gas"),
+   )
+
+This is the default specification used by the Gadget frontend.  It means that
+the fields are, in order, Coordinates, Velocities, ParticleIDs, Mass, and the
+fields InternalEnergy, Density and SmoothingLength *only* for Gas particles.
+So for example, if you have defined a Metallicity field for the particle type
+Halo, which comes right after ParticleIDs in the file, you could define it like
+this:
+
+.. code-block:: python
+
+   my_field_def = ( "Coordinates",
+               "Velocities",
+               "ParticleIDs",
+               ("Metallicity", "Halo"),
+               "Mass",
+               ("InternalEnergy", "Gas"),
+               ("Density", "Gas"),
+               ("SmoothingLength", "Gas"),
+   )
+
+To save time, you can utilize the plugins file for yt and use it to add items
+to the dictionary where these definitions are stored.  You could do this like
+so:
+
+.. code-block:: python
+
+   from yt.frontends.sph.definitions import gadget_field_specs
+   gadget_field_specs["my_field_def"] = my_field_def
+
+Please also feel free to issue a pull request with any new field
+specifications, as we're happy to include them in the main distribution!
+
+.. _gadget-ptype-spec:
+
+Particle Type Definitions
++++++++++++++++++++++++++
+
+In some cases, research groups add new particle types or re-order them.  You
+can supply alternate particle types by using the keyword ``ptype_spec`` to the
+``GadgetDataset`` call.  The default for Gadget binary data is:
+
+.. code-block:: python
+
+    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
+
+You can specify alternate names, but note that this may cause problems with the
+field specification if none of the names match old names.
+
+.. _gadget-header-spec:
+
+Header Specification
+++++++++++++++++++++
+
+If you have modified the header in your Gadget binary file, you can specify an
+alternate header specification with the keyword ``header_spec``.  This can
+either be a list of strings corresponding to individual header types known to
+yt, or it can be a combination of strings and header specifications.  The
+default header specification (found in ``yt/frontends/sph/definitions.py``) is:
+
+.. code-block:: python
+   
+    default      = (('Npart', 6, 'i'),
+                    ('Massarr', 6, 'd'),
+                    ('Time', 1, 'd'),
+                    ('Redshift', 1, 'd'),
+                    ('FlagSfr', 1, 'i'),
+                    ('FlagFeedback', 1, 'i'),
+                    ('Nall', 6, 'i'),
+                    ('FlagCooling', 1, 'i'),
+                    ('NumFiles', 1, 'i'),
+                    ('BoxSize', 1, 'd'),
+                    ('Omega0', 1, 'd'),
+                    ('OmegaLambda', 1, 'd'),
+                    ('HubbleParam', 1, 'd'),
+                    ('FlagAge', 1, 'i'),
+                    ('FlagMEtals', 1, 'i'),
+                    ('NallHW', 6, 'i'),
+                    ('unused', 16, 'i'))
+
+These items will all be accessible inside the object ``pf.parameters``, which
+is a dictionary.  You can add combinations of new items, specified in the same
+way, or alternately other types of headers.  The other string keys defined are
+``pad32``, ``pad64``, ``pad128``, and ``pad256`` each of which corresponds to
+an empty padding in bytes.  For example, if you have an additional 256 bytes of
+padding at the end, you can specify this with:
+
+.. code-block:: python
+
+   header_spec = ["default", "pad256"]
+
+This can then be supplied to the constructor.  Note that you can also do this
+manually, for instance with:
+
+
+.. code-block:: python
+
+   header_spec = ["default", (('some_value', 8, 'd'),
+                              ('another_value', 1, 'i'))]
+
+The letters correspond to data types from the Python struct module.  Please
+feel free to submit alternate header types to the main yt repository.
+
+.. _specifying-gadget-units:
+
+Specifying Units
+++++++++++++++++
+
+If you are running a cosmology simulation, yt will be able to guess the units
+with some reliability.  However, if you are not and you do not specify a
+parameter file, yt will not be able to and will use the defaults of length
+being 1.0 Mpc/h (comoving), velocity being in cm/s, and mass being in 10^10
+Msun/h.  You can specify alternate units by supplying the ``unit_base`` keyword
+argument of this form:
+
+.. code-block:: python
+
+   unit_base = {'length': (1.0, 'cm'), 'mass': (1.0, 'g'), 'time': (1.0, 's')}
+
+yt will utilize length, mass and time to set up all other units.
+
+.. _loading-tipsy-data:
+
+Tipsy Data
+----------
+
+yt also supports loading Tipsy data.  Many of its characteristics are similar
+to how Gadget data is loaded; specifically, it shares its definition of
+indexing and mesh-identification with that described in
+:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
+has not yet implemented header specifications, field specifications, or
+particle type specifications.  *These are all excellent projects for new
+contributors!*
+
+Tipsy data cannot be automatically detected.  You can load it with a command
+similar to the following:
+
+.. code-block:: python
+
+    ds = TipsyDataset('test.00169',
+        parameter_file='test.param',
+        endian = '<',
+        domain_left_edge = domain_left_edge,
+        domain_right_edge = domain_right_edge,
+    )
+
+Not all of these arguments are necessary; additionally, yt accepts the
+arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
+``unit_base``.  By default, yt will not utilize a parameter file, and by
+default it will assume the data is "big" endian (`>`).  Optionally, you may
+specify ``field_dtypes``, which describe the size of various fields.  For
+example, if you have stored positions as 64-bit floats, you can specify this
+with:
+
+.. code-block:: python
+
+    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
+                           field_dtypes = {"Coordinates": "d"})
+
+.. _specifying-cosmology-tipsy:
+
+Specifying Tipsy Cosmological Parameters
+++++++++++++++++++++++++++++++++++++++++
+
+Cosmological parameters can be specified to Tipsy to enable computation of
+default units.  The parameters recognized are of this form:
+
+.. code-block:: python
+
+   cosmology_parameters = {'current_redshift': 0.0,
+                           'omega_lambda': 0.728,
+                           'omega_matter': 0.272,
+                           'hubble_constant': 0.702}
+
+These will be used set the units, if they are specified.
+
+.. _loading-artio-data:
+
+ARTIO Data
+----------
+
+ARTIO data has a well-specified internal parameter system and has few free
+parameters.  However, for optimization purposes, the parameter that provides
+the most guidance to yt as to how to manage ARTIO data is ``max_range``.  This
+governs the maximum number of space-filling curve cells that will be used in a
+single "chunk" of data read from disk.  For small datasets, setting this number
+very large will enable more data to be loaded into memory at any given time;
+for very large datasets, this parameter can be left alone safely.  By default
+it is set to 1024; it can in principle be set as high as the total number of
+SFC cells.
+
+To load ARTIO data, you can specify a command such as this:
+
+.. code-block:: python
+
+    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
+
+.. _loading-art-data:
+
+ART Data
+--------
+
+ART data enjoys preliminary support and has been supported in the past by
+Christopher Moody.  Please contact the ``yt-dev`` mailing list if you are
+interested in using yt for ART data, or if you are interested in assisting with
+development of yt to work with ART data.
+
+To load an ART dataset you can use the ``load`` command provided by 
+``yt.mods`` and passing the gas mesh file. It will search for and attempt 
+to find the complementary dark matter and stellar particle header and data 
+files. However, your simulations may not follow the same naming convention.
+
+So for example, a single snapshot might have a series of files looking like
+this:
+
+.. code-block:: none
+
+   10MpcBox_csf512_a0.300.d    #Gas mesh
+   PMcrda0.300.DAT             #Particle header
+   PMcrs0a0.300.DAT            #Particle data (positions,velocities)
+   stars_a0.300.dat            #Stellar data (metallicities, ages, etc.)
+
+The ART frontend tries to find the associated files matching the above, but
+if that fails you can specify ``file_particle_data``,``file_particle_data``,
+``file_star_data`` in addition to the specifying the gas mesh. You also have 
+the option of gridding particles, and assigning them onto the meshes.
+This process is in beta, and for the time being it's probably  best to leave
+``do_grid_particles=False`` as the default.
+
+To speed up the loading of an ART file, you have a few options. You can turn 
+off the particles entirely by setting ``discover_particles=False``. You can
+also only grid octs up to a certain level, ``limit_level=5``, which is useful
+when debugging by artificially creating a 'smaller' dataset to work with.
+
+Finally, when stellar ages are computed we 'spread' the ages evenly within a
+smoothing window. By default this is turned on and set to 10Myr. To turn this 
+off you can set ``spread=False``, and you can tweak the age smoothing window
+by specifying the window in seconds, ``spread=1.0e7*265*24*3600``. 
+
+.. code-block:: python
+    
+   from yt.mods import *
+
+   pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
+
+.. _loading-moab-data:
+
+MOAB Data
+---------
+
+.. _loading-pyne-data:
+
+PyNE Data
+---------
+
+.. _loading-numpy-array:
+
+Generic Array Data
+------------------
+
+Even if your data is not strictly related to fields commonly used in
+astrophysical codes or your code is not supported yet, you can still feed it to
+``yt`` to use its advanced visualization and analysis facilities. The only
+requirement is that your data can be represented as one or more uniform, three
+dimensional numpy arrays. Assuming that you have your data in ``arr``,
+the following code:
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_uniform_grid
+
+   data = dict(Density = arr)
+   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
+   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
+will create ``yt``-native parameter file ``pf`` that will treat your array as
+density field in cubic domain of 3 Mpc edge size (3 * 3.08e24 cm) and
+simultaneously divide the domain into 12 chunks, so that you can take advantage
+of the underlying parallelism. 
+
+Particle fields are detected as one-dimensional fields. The number of
+particles is set by the ``number_of_particles`` key in
+``data``. Particle fields are then added as one-dimensional arrays in
+a similar manner as the three-dimensional grid fields:
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_uniform_grid
+
+   data = dict(Density = dens, 
+               number_of_particles = 1000000,
+               particle_position_x = posx_arr, 
+	       particle_position_y = posy_arr,
+	       particle_position_z = posz_arr)
+   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
+   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
+
+where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
+arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Particles may be difficult to integrate.
+* Data must already reside in memory.
+
+.. _loading-amr-data:
+
+Generic AMR Data
+----------------
+
+It is possible to create native ``yt`` parameter file from Python's dictionary
+that describes set of rectangular patches of data of possibly varying
+resolution. 
+
+.. code-block:: python
+
+   from yt.frontends.stream.api import load_amr_grids
+
+   grid_data = [
+       dict(left_edge = [0.0, 0.0, 0.0],
+            right_edge = [1.0, 1.0, 1.],
+            level = 0,
+            dimensions = [32, 32, 32],
+            number_of_particles = 0)
+       dict(left_edge = [0.25, 0.25, 0.25],
+            right_edge = [0.75, 0.75, 0.75],
+            level = 1,
+            dimensions = [32, 32, 32],
+            number_of_particles = 0)
+   ]
+  
+   for g in grid_data:
+       g["density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+  
+   pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+
+Particle fields are supported by adding 1-dimensional arrays and
+setting the ``number_of_particles`` key to each ``grid``'s dict:
+
+.. code-block:: python
+
+    for g in grid_data:
+        g["number_of_particles"] = 100000
+        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Some functions may behave oddly, and parallelism will be disappointing or
+  non-existent in most cases.
+* No consistency checks are performed on the index
+* Data must already reside in memory.
+* Consistency between particle positions and grids is not checked;
+  ``load_amr_grids`` assumes that particle positions associated with one grid are
+  not bounded within another grid at a higher level, so this must be
+  ensured by the user prior to loading the grid data. 
+
+Generic Particle Data
+---------------------
+

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b doc/source/examining/supported_frontends_data.rst
--- a/doc/source/examining/supported_frontends_data.rst
+++ /dev/null
@@ -1,186 +0,0 @@
-.. _loading-data-from-supported-codes:
-
-Loading Data from Supported Codes
-=================================
-
-This section contains information on how to load data into ``yt`` from
-supported codes, as well as some important caveats about different
-data formats.
-
-.. _loading-enzo-data:
-
-Enzo Data
----------
-
-Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
-dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
-it the parameter file name.  This would be the name of the output file, and it
-contains no extension.  For instance, if you have the following files:
-
-.. code-block:: none
-
-   DD0010/
-   DD0010/data0010
-   DD0010/data0010.index
-   DD0010/data0010.cpu0000
-   DD0010/data0010.cpu0001
-   DD0010/data0010.cpu0002
-   DD0010/data0010.cpu0003
-
-You would feed the ``load`` command the filename ``DD0010/data0010`` as
-mentioned.
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("DD0010/data0010")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Enzo usage
-* Units should be correct, if you utilize standard unit-setting routines.  yt
-  will notify you if it cannot determine the units, although this
-  notification will be passive.
-* 2D and 1D data are supported, but the extraneous dimensions are set to be
-  of length 1.0
-
-.. _loading-orion-data:
-
-Orion Data
-----------
-
-Orion data is fully supported. To load an Orion dataset, you can use the
-``load`` command provided by ``yt.mods`` and supply to it the directory file
-name.  **You must also have the ``inputs`` file in the base directory.** For
-instance, if you were in a directory with the following files:
-
-.. code-block:: none
-
-   inputs
-   pltgmlcs5600/
-   pltgmlcs5600/Header
-   pltgmlcs5600/Level_0
-   pltgmlcs5600/Level_0/Cell_H
-   pltgmlcs5600/Level_1
-   pltgmlcs5600/Level_1/Cell_H
-   pltgmlcs5600/Level_2
-   pltgmlcs5600/Level_2/Cell_H
-   pltgmlcs5600/Level_3
-   pltgmlcs5600/Level_3/Cell_H
-   pltgmlcs5600/Level_4
-   pltgmlcs5600/Level_4/Cell_H
-
-You would feed it the filename ``pltgmlcs5600``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("pltgmlcs5600")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Orion usage
-* Star particles are not supported at the current time
-
-.. _loading-flash-data:
-
-FLASH Data
-----------
-
-FLASH HDF5 data is fully supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
-supply to it the file name of a plot file or checkpoint file.  Particle
-files are not currently directly loadable by themselves, due to the
-fact that they typically lack grid information. For instance, if you were in a directory with
-the following files:
-
-.. code-block:: none
-
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
-
-.. code-block:: python
-
-    from yt.mods import *
-    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
-
-.. rubric:: Caveats
-
-* Please be careful that the units are correctly utilized; yt assumes cgs
-* Velocities and length units will be scaled to comoving coordinates if yt is
-  able to discern you are examining a cosmology simulation; particle and grid
-  positions will not be.
-* Domains may be visualized assuming periodicity.
-
-Athena Data
------------
-
-Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported. 
-
-Loading Athena datasets is slightly different depending on whether
-your dataset came from a serial or a parallel run. If the data came
-from a serial run or you have joined the VTK files together using the
-Athena tool ``join_vtk``, you can load the data like this:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("kh.0010.vtk")
-
-The filename corresponds to the file on SMR level 0, whereas if there
-are multiple levels the corresponding files will be picked up
-automatically, assuming they are laid out in ``lev*`` subdirectories
-under the directory where the base file is located.
-
-For parallel datasets, yt assumes that they are laid out in
-directories named ``id*``, one for each processor number, each with
-``lev*`` subdirectories for additional refinement levels. To load this
-data, call ``load`` with the base file in the ``id0`` directory:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("id0/kh.0010.vtk")
-
-which will pick up all of the files in the different ``id*`` directories for
-the entire dataset. 
-
-yt works in cgs ("Gaussian") units, but Athena data is not
-normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and density to ``load``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("id0/cluster_merger.0250.vtk", 
-          parameters={"LengthUnits":3.0856e24,
-                               "TimeUnits":3.1557e13,"DensityUnits":1.67e-24)
-
-This means that the yt fields (e.g. ``Density``, ``x-velocity``,
-``Bx``) will be in cgs units, but the Athena fields (e.g.,
-``density``, ``velocity_x``, ``cell_centered_B_x``) will be in code
-units. 
-
-.. rubric:: Caveats
-
-* yt primarily works with primitive variables. If the Athena
-  dataset contains conservative variables, the yt primitive fields will be generated from the
-  conserved variables on disk. 
-* Domains may be visualized assuming periodicity.
-* Particle list data is currently unsupported.
-* In some parallel Athena datasets, it is possible for a grid from one
-  refinement level to overlap with more than one grid on the parent
-  level. This may result in unpredictable behavior for some analysis
-  or visualization tasks. 

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b doc/source/loading_data.rst
--- a/doc/source/loading_data.rst
+++ /dev/null
@@ -1,601 +0,0 @@
-.. _loading-data:
-
-Loading Data
-============
-
-This section contains information on how to load data into ``yt``, as well as
-some important caveats about different data formats.
-
-.. _loading-enzo-data:
-
-Enzo Data
----------
-
-Enzo data is fully supported and cared for by Matthew Turk.  To load an Enzo
-dataset, you can use the ``load`` command provided by ``yt.mods`` and supply to
-it the parameter file name.  This would be the name of the output file, and it
-contains no extension.  For instance, if you have the following files:
-
-.. code-block:: none
-
-   DD0010/
-   DD0010/data0010
-   DD0010/data0010.index
-   DD0010/data0010.cpu0000
-   DD0010/data0010.cpu0001
-   DD0010/data0010.cpu0002
-   DD0010/data0010.cpu0003
-
-You would feed the ``load`` command the filename ``DD0010/data0010`` as
-mentioned.
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("DD0010/data0010")
-
-.. rubric:: Caveats
-
-* There are no major caveats for Enzo usage
-* Units should be correct, if you utilize standard unit-setting routines.  yt
-  will notify you if it cannot determine the units, although this
-  notification will be passive.
-* 2D and 1D data are supported, but the extraneous dimensions are set to be
-  of length 1.0 in "code length" which may produce strange results for volume
-  quantities.
-
-.. _loading-orion-data:
-
-Boxlib Data
------------
-
-yt has been tested with Boxlib data generated by Orion, Nyx, Maestro and
-Castro.  Currently it is cared for by a combination of Andrew Myers, Chris
-Malone, and Matthew Turk.
-
-To load a Boxlib dataset, you can use the ``load`` command provided by
-``yt.mods`` and supply to it the directory file name.  **You must also have the
-``inputs`` file in the base directory.**  For instance, if you were in a
-directory with the following files:
-
-.. code-block:: none
-
-   inputs
-   pltgmlcs5600/
-   pltgmlcs5600/Header
-   pltgmlcs5600/Level_0
-   pltgmlcs5600/Level_0/Cell_H
-   pltgmlcs5600/Level_1
-   pltgmlcs5600/Level_1/Cell_H
-   pltgmlcs5600/Level_2
-   pltgmlcs5600/Level_2/Cell_H
-   pltgmlcs5600/Level_3
-   pltgmlcs5600/Level_3/Cell_H
-   pltgmlcs5600/Level_4
-   pltgmlcs5600/Level_4/Cell_H
-
-You would feed it the filename ``pltgmlcs5600``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("pltgmlcs5600")
-
-.. _loading-flash-data:
-
-FLASH Data
-----------
-
-FLASH HDF5 data is *mostly* supported and cared for by John ZuHone.  To load a
-FLASH dataset, you can use the ``load`` command provided by ``yt.mods`` and
-supply to it the file name of a plot file or checkpoint file, but particle
-files are not currently directly loadable by themselves, due to the fact that
-they typically lack grid information. For instance, if you were in a directory
-with the following files:
-
-.. code-block:: none
-
-   cosmoSim_coolhdf5_chk_0026
-
-You would feed it the filename ``cosmoSim_coolhdf5_chk_0026``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("cosmoSim_coolhdf5_chk_0026")
-
-If you have a FLASH particle file that was created at the same time as
-a plotfile or checkpoint file (therefore having particle data
-consistent with the grid structure of the latter), its data may be loaded with the
-``particle_filename`` optional argument:
-
-.. code-block:: python
-
-    from yt.mods import *
-    pf = load("radio_halo_1kpc_hdf5_plt_cnt_0100", particle_filename="radio_halo_1kpc_hdf5_part_0100")
-
-.. rubric:: Caveats
-
-* Please be careful that the units are correctly utilized; yt assumes cgs.
-
-.. _loading-ramses-data:
-
-RAMSES Data
------------
-
-In yt-3.0, RAMSES data is fully supported.  If you are interested in taking a
-development or stewardship role, please contact the yt-dev mailing list.  To
-load a RAMSES dataset, you can use the ``load`` command provided by ``yt.mods``
-and supply to it the ``info*.txt`` filename.  For instance, if you were in a
-directory with the following files:
-
-.. code-block:: none
-
-   output_00007
-   output_00007/amr_00007.out00001
-   output_00007/grav_00007.out00001
-   output_00007/hydro_00007.out00001
-   output_00007/info_00007.txt
-   output_00007/part_00007.out00001
-
-You would feed it the filename ``output_00007/info_00007.txt``:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("output_00007/info_00007.txt")
-
-yt will attempt to guess the fields in the file.  You may also specify a list
-of fields by supplying the ``fields`` keyword in your call to ``load``.
-
-.. _loading-gadget-data:
-
-Gadget Data
------------
-
-yt has support for reading Gadget data in both raw binary and HDF5 formats.  It
-is able to access the particles as it would any other particle dataset, and it
-can apply smoothing kernels to the data to produce both quantitative analysis
-and visualization.
-
-Gadget data in HDF5 format can be loaded with the ``load`` command:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = load("snapshot_061.hdf5")
-
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
-
-.. code-block:: python
-
-   from yt.mods import *
-   pf = GadgetDataset("snapshot_061")
-
-.. _particle-bbox:
-
-Units and Bounding Boxes
-++++++++++++++++++++++++
-
-There are two additional pieces of information that may be needed.  If your
-simulation is cosmological, yt can often guess the bounding box and the units
-of the simulation.  However, for isolated simulations and for cosmological
-simulations with non-standard units, these must be supplied.  For example, if
-a length unit of 1.0 corresponds to a kiloparsec, you can supply this in the
-constructor.  yt can accept units such as ``Mpc``, ``kpc``, ``cm``, ``Mpccm/h``
-and so on.  In particular, note that ``Mpc/h`` and ``Mpccm/h`` (``cm`` for
-comoving here) are usable unit definitions.
-
-yt will attempt to use units for ``mass``, ``length`` and ``time`` as supplied
-in the argument ``unit_base``.  The ``bounding_box`` argument is a list of
-two-item tuples or lists that describe the left and right extents of the
-particles.
-
-.. code-block:: python
-
-   pf = GadgetDataset("snap_004",
-           unit_base = {'length': ('kpc', 1.0)},
-           bounding_box = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]])
-
-.. _particle-indexing-criteria:
-
-Indexing Criteria
-+++++++++++++++++
-
-yt generates a global mesh index via octree that governs the resolution of
-volume elements.  This is governed by two parameters, ``n_ref`` and
-``over_refine_factor``.  They are weak proxies for each other.  The first,
-``n_ref``, governs how many particles in an oct results in that oct being
-refined into eight child octs.  Lower values mean higher resolution; the
-default is 64.  The secon parameter, ``over_refine_factor``, governs how many
-cells are in a given oct; the default value of 1 corresponds to 8 cells.
-The number of cells in an oct is defined by the expression
-``2**(3*over_refine_factor)``.
-
-It's recommended that if you want higher-resolution, try reducing the value of
-``n_ref`` to 32 or 16.
-
-.. _gadget-field-spec:
-
-Field Specifications
-++++++++++++++++++++
-
-Binary Gadget outputs often have additional fields or particle types that are
-non-standard from the default Gadget distribution format.  These can be
-specified in the call to ``GadgetDataset`` by either supplying one of the
-sets of field specifications as a string or by supplying a field specification
-itself.  As an example, yt has built-in definitions for ``default`` (the
-default) and ``agora_unlv``.  Field specifications must be tuples, and must be
-of this format:
-
-.. code-block:: python
-
-   default = ( "Coordinates",
-               "Velocities",
-               "ParticleIDs",
-               "Mass",
-               ("InternalEnergy", "Gas"),
-               ("Density", "Gas"),
-               ("SmoothingLength", "Gas"),
-   )
-
-This is the default specification used by the Gadget frontend.  It means that
-the fields are, in order, Coordinates, Velocities, ParticleIDs, Mass, and the
-fields InternalEnergy, Density and SmoothingLength *only* for Gas particles.
-So for example, if you have defined a Metallicity field for the particle type
-Halo, which comes right after ParticleIDs in the file, you could define it like
-this:
-
-.. code-block:: python
-
-   my_field_def = ( "Coordinates",
-               "Velocities",
-               "ParticleIDs",
-               ("Metallicity", "Halo"),
-               "Mass",
-               ("InternalEnergy", "Gas"),
-               ("Density", "Gas"),
-               ("SmoothingLength", "Gas"),
-   )
-
-To save time, you can utilize the plugins file for yt and use it to add items
-to the dictionary where these definitions are stored.  You could do this like
-so:
-
-.. code-block:: python
-
-   from yt.frontends.sph.definitions import gadget_field_specs
-   gadget_field_specs["my_field_def"] = my_field_def
-
-Please also feel free to issue a pull request with any new field
-specifications, as we're happy to include them in the main distribution!
-
-.. _gadget-ptype-spec:
-
-Particle Type Definitions
-+++++++++++++++++++++++++
-
-In some cases, research groups add new particle types or re-order them.  You
-can supply alternate particle types by using the keyword ``ptype_spec`` to the
-``GadgetDataset`` call.  The default for Gadget binary data is:
-
-.. code-block:: python
-
-    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
-
-You can specify alternate names, but note that this may cause problems with the
-field specification if none of the names match old names.
-
-.. _gadget-header-spec:
-
-Header Specification
-++++++++++++++++++++
-
-If you have modified the header in your Gadget binary file, you can specify an
-alternate header specification with the keyword ``header_spec``.  This can
-either be a list of strings corresponding to individual header types known to
-yt, or it can be a combination of strings and header specifications.  The
-default header specification (found in ``yt/frontends/sph/definitions.py``) is:
-
-.. code-block:: python
-   
-    default      = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i'))
-
-These items will all be accessible inside the object ``pf.parameters``, which
-is a dictionary.  You can add combinations of new items, specified in the same
-way, or alternately other types of headers.  The other string keys defined are
-``pad32``, ``pad64``, ``pad128``, and ``pad256`` each of which corresponds to
-an empty padding in bytes.  For example, if you have an additional 256 bytes of
-padding at the end, you can specify this with:
-
-.. code-block:: python
-
-   header_spec = ["default", "pad256"]
-
-This can then be supplied to the constructor.  Note that you can also do this
-manually, for instance with:
-
-
-.. code-block:: python
-
-   header_spec = ["default", (('some_value', 8, 'd'),
-                              ('another_value', 1, 'i'))]
-
-The letters correspond to data types from the Python struct module.  Please
-feel free to submit alternate header types to the main yt repository.
-
-.. _specifying-gadget-units:
-
-Specifying Units
-++++++++++++++++
-
-If you are running a cosmology simulation, yt will be able to guess the units
-with some reliability.  However, if you are not and you do not specify a
-parameter file, yt will not be able to and will use the defaults of length
-being 1.0 Mpc/h (comoving), velocity being in cm/s, and mass being in 10^10
-Msun/h.  You can specify alternate units by supplying the ``unit_base`` keyword
-argument of this form:
-
-.. code-block:: python
-
-   unit_base = {'length': (1.0, 'cm'), 'mass': (1.0, 'g'), 'time': (1.0, 's')}
-
-yt will utilize length, mass and time to set up all other units.
-
-.. _loading-tipsy-data:
-
-Tipsy Data
-----------
-
-yt also supports loading Tipsy data.  Many of its characteristics are similar
-to how Gadget data is loaded; specifically, it shares its definition of
-indexing and mesh-identification with that described in
-:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
-has not yet implemented header specifications, field specifications, or
-particle type specifications.  *These are all excellent projects for new
-contributors!*
-
-Tipsy data cannot be automatically detected.  You can load it with a command
-similar to the following:
-
-.. code-block:: python
-
-    ds = TipsyDataset('test.00169',
-        parameter_file='test.param',
-        endian = '<',
-        domain_left_edge = domain_left_edge,
-        domain_right_edge = domain_right_edge,
-    )
-
-Not all of these arguments are necessary; additionally, yt accepts the
-arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
-``unit_base``.  By default, yt will not utilize a parameter file, and by
-default it will assume the data is "big" endian (`>`).  Optionally, you may
-specify ``field_dtypes``, which describe the size of various fields.  For
-example, if you have stored positions as 64-bit floats, you can specify this
-with:
-
-.. code-block:: python
-
-    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
-                           field_dtypes = {"Coordinates": "d"})
-
-.. _specifying-cosmology-tipsy:
-
-Specifying Tipsy Cosmological Parameters
-++++++++++++++++++++++++++++++++++++++++
-
-Cosmological parameters can be specified to Tipsy to enable computation of
-default units.  The parameters recognized are of this form:
-
-.. code-block:: python
-
-   cosmology_parameters = {'current_redshift': 0.0,
-                           'omega_lambda': 0.728,
-                           'omega_matter': 0.272,
-                           'hubble_constant': 0.702}
-
-These will be used set the units, if they are specified.
-
-.. _loading-artio-data:
-
-ARTIO Data
-----------
-
-ARTIO data has a well-specified internal parameter system and has few free
-parameters.  However, for optimization purposes, the parameter that provides
-the most guidance to yt as to how to manage ARTIO data is ``max_range``.  This
-governs the maximum number of space-filling curve cells that will be used in a
-single "chunk" of data read from disk.  For small datasets, setting this number
-very large will enable more data to be loaded into memory at any given time;
-for very large datasets, this parameter can be left alone safely.  By default
-it is set to 1024; it can in principle be set as high as the total number of
-SFC cells.
-
-To load ARTIO data, you can specify a command such as this:
-
-.. code-block:: python
-
-    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
-
-.. _loading-art-data:
-
-ART Data
---------
-
-ART data enjoys preliminary support and has been supported in the past by
-Christopher Moody.  Please contact the ``yt-dev`` mailing list if you are
-interested in using yt for ART data, or if you are interested in assisting with
-development of yt to work with ART data.
-
-To load an ART dataset you can use the ``load`` command provided by 
-``yt.mods`` and passing the gas mesh file. It will search for and attempt 
-to find the complementary dark matter and stellar particle header and data 
-files. However, your simulations may not follow the same naming convention.
-
-So for example, a single snapshot might have a series of files looking like
-this:
-
-.. code-block:: none
-
-   10MpcBox_csf512_a0.300.d    #Gas mesh
-   PMcrda0.300.DAT             #Particle header
-   PMcrs0a0.300.DAT            #Particle data (positions,velocities)
-   stars_a0.300.dat            #Stellar data (metallicities, ages, etc.)
-
-The ART frontend tries to find the associated files matching the above, but
-if that fails you can specify ``file_particle_data``,``file_particle_data``,
-``file_star_data`` in addition to the specifying the gas mesh. You also have 
-the option of gridding particles, and assigning them onto the meshes.
-This process is in beta, and for the time being it's probably  best to leave
-``do_grid_particles=False`` as the default.
-
-To speed up the loading of an ART file, you have a few options. You can turn 
-off the particles entirely by setting ``discover_particles=False``. You can
-also only grid octs up to a certain level, ``limit_level=5``, which is useful
-when debugging by artificially creating a 'smaller' dataset to work with.
-
-Finally, when stellar ages are computed we 'spread' the ages evenly within a
-smoothing window. By default this is turned on and set to 10Myr. To turn this 
-off you can set ``spread=False``, and you can tweak the age smoothing window
-by specifying the window in seconds, ``spread=1.0e7*265*24*3600``. 
-
-.. code-block:: python
-    
-   from yt.mods import *
-
-   pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
-
-.. _loading-moab-data:
-
-MOAB Data
----------
-
-.. _loading-pyne-data:
-
-PyNE Data
----------
-
-.. _loading-numpy-array:
-
-Generic Array Data
-------------------
-
-Even if your data is not strictly related to fields commonly used in
-astrophysical codes or your code is not supported yet, you can still feed it to
-``yt`` to use its advanced visualization and analysis facilities. The only
-requirement is that your data can be represented as one or more uniform, three
-dimensional numpy arrays. Assuming that you have your data in ``arr``,
-the following code:
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_uniform_grid
-
-   data = dict(Density = arr)
-   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
-   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
-
-will create ``yt``-native parameter file ``pf`` that will treat your array as
-density field in cubic domain of 3 Mpc edge size (3 * 3.08e24 cm) and
-simultaneously divide the domain into 12 chunks, so that you can take advantage
-of the underlying parallelism. 
-
-Particle fields are detected as one-dimensional fields. The number of
-particles is set by the ``number_of_particles`` key in
-``data``. Particle fields are then added as one-dimensional arrays in
-a similar manner as the three-dimensional grid fields:
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_uniform_grid
-
-   data = dict(Density = dens, 
-               number_of_particles = 1000000,
-               particle_position_x = posx_arr, 
-	       particle_position_y = posy_arr,
-	       particle_position_z = posz_arr)
-   bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
-   pf = load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
-
-where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
-arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
-
-.. rubric:: Caveats
-
-* Units will be incorrect unless the data has already been converted to cgs.
-* Particles may be difficult to integrate.
-* Data must already reside in memory.
-
-.. _loading-amr-data:
-
-Generic AMR Data
-----------------
-
-It is possible to create native ``yt`` parameter file from Python's dictionary
-that describes set of rectangular patches of data of possibly varying
-resolution. 
-
-.. code-block:: python
-
-   from yt.frontends.stream.api import load_amr_grids
-
-   grid_data = [
-       dict(left_edge = [0.0, 0.0, 0.0],
-            right_edge = [1.0, 1.0, 1.],
-            level = 0,
-            dimensions = [32, 32, 32],
-            number_of_particles = 0)
-       dict(left_edge = [0.25, 0.25, 0.25],
-            right_edge = [0.75, 0.75, 0.75],
-            level = 1,
-            dimensions = [32, 32, 32],
-            number_of_particles = 0)
-   ]
-  
-   for g in grid_data:
-       g["density"] = np.random.random(g["dimensions"]) * 2**g["level"]
-  
-   pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
-
-Particle fields are supported by adding 1-dimensional arrays and
-setting the ``number_of_particles`` key to each ``grid``'s dict:
-
-.. code-block:: python
-
-    for g in grid_data:
-        g["number_of_particles"] = 100000
-        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
-
-.. rubric:: Caveats
-
-* Units will be incorrect unless the data has already been converted to cgs.
-* Some functions may behave oddly, and parallelism will be disappointing or
-  non-existent in most cases.
-* No consistency checks are performed on the index
-* Data must already reside in memory.
-* Consistency between particle positions and grids is not checked;
-  ``load_amr_grids`` assumes that particle positions associated with one grid are
-  not bounded within another grid at a higher level, so this must be
-  ensured by the user prior to loading the grid data. 
-
-Generic Particle Data
----------------------
-

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -0,0 +1,244 @@
+import numpy as np
+import os, sys
+cimport numpy as np
+cimport cython
+#from cpython.mem cimport PyMem_Malloc
+from libc.stdlib cimport malloc, free
+import sys
+
+
+
+# Importing relevant rockstar data types particle, fof halo, halo
+
+cdef import from "particle.h":
+	struct particle:
+		np.int64_t id
+		float pos[6]
+
+cdef import from "fof.h":
+	struct fof:
+		np.int64_t num_p
+		particle *particles
+
+cdef import from "halo.h":
+	struct halo:
+		np.int64_t id
+		float pos[6], corevel[3], bulkvel[3]
+		float m, r, child_r, vmax_r, mgrav,	vmax, rvmax, rs, klypin_rs, vrms
+		float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+		float bullock_spin, kin_to_pot
+		np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+		float min_pos_err, min_vel_err, min_bulkvel_err
+
+# For finding sub halos import finder function and global variable
+# rockstar uses to store the results
+
+cdef import from "groupies.h":
+	void find_subs(fof *f) 
+	halo *halos
+	np.int64_t num_halos
+	void calc_mass_definition()
+
+# For outputing halos, rockstar style
+
+cdef import from "meta_io.h":
+	void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) 
+
+# For setting up the configuration of rockstar
+
+cdef import from "config.h":
+	void setup_config()
+
+cdef import from "config_vars.h":
+	# Rockstar cleverly puts all of the config variables inside a templated
+	# definition of their vaiables.
+	char *FILE_FORMAT
+	np.float64_t PARTICLE_MASS
+
+	char *MASS_DEFINITION
+	np.int64_t MIN_HALO_OUTPUT_SIZE
+	np.float64_t FORCE_RES
+
+	np.float64_t SCALE_NOW
+	np.float64_t h0
+	np.float64_t Ol
+	np.float64_t Om
+
+	np.int64_t GADGET_ID_BYTES
+	np.float64_t GADGET_MASS_CONVERSION
+	np.float64_t GADGET_LENGTH_CONVERSION
+	np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+	np.int64_t RESCALE_PARTICLE_MASS
+
+	np.int64_t PARALLEL_IO
+	char *PARALLEL_IO_SERVER_ADDRESS
+	char *PARALLEL_IO_SERVER_PORT
+	np.int64_t PARALLEL_IO_WRITER_PORT
+	char *PARALLEL_IO_SERVER_INTERFACE
+	char *RUN_ON_SUCCESS
+
+	char *INBASE
+	char *FILENAME
+	np.int64_t STARTING_SNAP
+	np.int64_t NUM_SNAPS
+	np.int64_t NUM_BLOCKS
+	np.int64_t NUM_READERS
+	np.int64_t PRELOAD_PARTICLES
+	char *SNAPSHOT_NAMES
+	char *LIGHTCONE_ALT_SNAPS
+	char *BLOCK_NAMES
+
+	char *OUTBASE
+	np.float64_t OVERLAP_LENGTH
+	np.int64_t NUM_WRITERS
+	np.int64_t FORK_READERS_FROM_WRITERS
+	np.int64_t FORK_PROCESSORS_PER_MACHINE
+
+	char *OUTPUT_FORMAT
+	np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+	np.int64_t FULL_PARTICLE_CHUNKS
+	char *BGC2_SNAPNAMES
+
+	np.int64_t BOUND_PROPS
+	np.int64_t BOUND_OUT_TO_HALO_EDGE
+	np.int64_t DO_MERGER_TREE_ONLY
+	np.int64_t IGNORE_PARTICLE_IDS
+	np.float64_t TRIM_OVERLAP
+	np.float64_t ROUND_AFTER_TRIM
+	np.int64_t LIGHTCONE
+	np.int64_t PERIODIC
+
+	np.float64_t LIGHTCONE_ORIGIN[3]
+	np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+
+	np.float64_t LIMIT_CENTER[3]
+	np.float64_t LIMIT_RADIUS
+
+	np.int64_t SWAP_ENDIANNESS
+	np.int64_t GADGET_VARIANT
+
+	np.float64_t FOF_FRACTION
+	np.float64_t FOF_LINKING_LENGTH
+	np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+	np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+	np.int64_t TEMPORAL_HALO_FINDING
+	np.int64_t MIN_HALO_PARTICLES
+	np.float64_t UNBOUND_THRESHOLD
+	np.int64_t ALT_NFW_METRIC
+
+	np.int64_t TOTAL_PARTICLES
+	np.float64_t BOX_SIZE
+	np.int64_t OUTPUT_HMAD
+	np.int64_t OUTPUT_PARTICLES
+	np.int64_t OUTPUT_LEVELS
+	np.float64_t DUMP_PARTICLES[3]
+
+	np.float64_t AVG_PARTICLE_SPACING
+	np.int64_t SINGLE_SNAP
+
+
+
+cdef class RockstarGroupiesInterface:
+	
+	cdef public object pf
+	cdef public object fof
+
+	# For future use/consistency
+	def __cinit__(self,pf):
+		self.pf = pf
+
+	def setup_rockstar(self,
+						particle_mass,
+						int periodic = 1, force_res=None,
+						int min_halo_size = 25, outbase = "None",
+						callbacks = None):
+		global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+		global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+		global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+		global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+		global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+		
+
+		if force_res is not None:
+			FORCE_RES=np.float64(force_res)
+
+		OVERLAP_LENGTH = 0.0
+		
+		FILENAME = "inline.<block>"
+		FILE_FORMAT = "GENERIC"
+		OUTPUT_FORMAT = "ASCII"
+		MIN_HALO_OUTPUT_SIZE=min_halo_size
+		
+		pf = self.pf
+
+		h0 = pf.hubble_constant
+		Ol = pf.omega_lambda
+		Om = pf.omega_matter
+		
+		SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+		
+		if not outbase =='None'.decode('UTF-8'):
+			#output directory. since we can't change the output filenames
+			#workaround is to make a new directory
+			OUTBASE = outbase 
+
+
+		PARTICLE_MASS = particle_mass.in_units('Msun/h')
+		PERIODIC = periodic
+		BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
+
+		# Set up the configuration options
+		setup_config()
+
+		# Needs to be called so rockstar can use the particle mass parameter
+		# to calculate virial quantities properly
+		calc_mass_definition()
+
+
+
+	def make_rockstar_fof(self,fof_ids, pos, vel):
+
+		# Turn positions and velocities into units we want
+		pos = pos.in_units('Mpccm/h')
+		vel = vel.in_units('km/s')
+
+		# Define fof object
+		cdef fof fof_obj
+
+		# Find number of particles
+		cdef np.int64_t num_particles = len(fof_ids)
+
+		# Allocate space for correct number of particles
+		cdef particle* particles = <particle*> malloc(num_particles * sizeof(particle))
+
+		# Fill in array of particles with particle that fof identified
+		# This is possibly the slowest way to code this, but for now
+		# I just want it to work
+		for i,id in enumerate(fof_ids):
+			particles[i].id = id
+
+			# fill in locations & velocities
+			for j in range(3):
+				particles[i].pos[j] = pos[id][j]
+				particles[i].pos[j+3] = vel[id][j]
+
+
+		# Assign pointer to particles into FOF object 
+		fof_obj.particles = particles
+
+		# Assign number of particles into FOF object
+		fof_obj.num_p = num_particles
+
+		# Make pointer to fof object
+		cdef fof* fof_pointer = & fof_obj
+
+		# Find the sub halos using rockstar by passing a pointer to the fof object
+		find_subs( fof_pointer)
+
+		# Output the halos, rockstar style
+		output_halos(0, 0, 0, NULL) 
+
+		free(particles)
+
+
+

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -472,7 +472,7 @@
             print "Often this can be fixed by changing the 'endian' parameter."
             print "This defaults to '>' but may in fact be '<'."
             raise RuntimeError
-        if self.parameters.get('bComove', True):
+        if self.parameters.get('bComove', False):
             self.cosmological_simulation = 1
             cosm = self._cosmology_parameters or {}
             dcosm = dict(current_redshift=0.0,

diff -r 219051a16966a33457f7fa225bae37dc6ff30af6 -r 031ac7fd776e22564a3f7595648c8d9de4142e3b yt/utilities/lib/origami.pyx
--- /dev/null
+++ b/yt/utilities/lib/origami.pyx
@@ -0,0 +1,52 @@
+"""
+This calls the ORIGAMI routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+from libc.stdlib cimport malloc, free
+
+cdef extern from "origami_tags.h":
+    int compute_tags(int ng, double boxsize, double **r, int npart,
+                     unsigned char *m)
+
+cdef int printed_citation = 0
+
+def run_origami(np.ndarray[np.float64_t, ndim=1] pos_x,
+                np.ndarray[np.float64_t, ndim=1] pos_y,
+                np.ndarray[np.float64_t, ndim=1] pos_z,
+                double boxsize):
+    # We assume these have been passed in in the correct order and
+    # C-contiguous.
+    global printed_citation
+    if printed_citation == 0:
+        print "ORIGAMI was developed by Bridget Falck and Mark Neyrinck."
+        print "Please cite Falck, Neyrinck, & Szalay 2012, ApJ, 754, 2, 125."
+        printed_citation = 1
+    cdef int npart = pos_x.size
+    if npart == 1:
+        return np.zeros(1, dtype="uint8")
+    assert(sizeof(unsigned char) == sizeof(np.uint8_t))
+    assert(sizeof(double) == sizeof(np.float64_t))
+    cdef int ng = np.round(npart**(1./3))
+    assert(ng**3 == npart)
+    cdef double **r = <double **> malloc(sizeof(double *) * 3)
+    r[0] = <double *> pos_x.data
+    r[1] = <double *> pos_y.data
+    r[2] = <double *> pos_z.data
+    cdef np.ndarray[np.uint8_t, ndim=1] tags = np.zeros(npart, dtype="uint8")
+    cdef void *m = <void*> tags.data
+    compute_tags(ng, boxsize, r, npart, <unsigned char*> m)
+    free(r)
+    return tags

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b0a2e2d086e0/
Changeset:   b0a2e2d086e0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 05:07:05
Summary:     Merging from experimental bookmark.
Affected #:  38 files

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ b/doc/source/analyzing/creating_derived_fields.rst
@@ -295,8 +295,6 @@
      (*Advanced*) Should this field appear in the dropdown box in Reason?
    ``not_in_all``
      (*Advanced*) If this is *True*, the field may not be in all the grids.
-   ``projection_conversion``
-     (*Advanced*) Which unit should we multiply by in a projection?
 
 How Do Units Work?
 ------------------

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -40,8 +40,7 @@
 
    add_enzo_field("Cooling_Time", units=r"\rm{s}",
                   function=NullFunc,
-                  validators=ValidateDataField("Cooling_Time"),
-                  projection_conversion="1")
+                  validators=ValidateDataField("Cooling_Time"))
 
 Note that we used the ``NullFunc`` function here.  To add a derived field,
 which is not expected to necessarily exist on disk, use the standard

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -39,26 +39,35 @@
     pass
 
 vlist = "xyz"
+def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
+    def _t_squared(field, data):
+        return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
+    registry.add_field(("gas", "t_squared"),
+                       function = _t_squared,
+                       units="g*keV**2/cm**3")
+    def _beta_perp_squared(field, data):
+        return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
+    registry.add_field(("gas","beta_perp_squared"), 
+                       function = _beta_perp_squared,
+                       units="g/cm**3")
 
- at derived_field(name=("gas","t_squared"), units="g*keV**2/cm**3")
-def _t_squared(field, data):
-    return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
+    def _beta_par_squared(field, data):
+        return data["gas","beta_par"]**2/data["gas","density"]
+    registry.add_field(("gas","beta_par_squared"),
+                       function = _beta_par_squared,
+                       units="g/cm**3")
 
- at derived_field(name=("gas","beta_perp_squared"), units="g/cm**3")
-def _beta_perp_squared(field, data):
-    return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
+    def _t_beta_par(field, data):
+        return data["gas","kT"]*data["gas","beta_par"]
+    registry.add_field(("gas","t_beta_par"),
+                       function = _t_beta_par,
+                       units="keV*g/cm**3")
 
- at derived_field(name=("gas","beta_par_squared"), units="g/cm**3")
-def _beta_par_squared(field, data):
-    return data["gas","beta_par"]**2/data["gas","density"]
-
- at derived_field(name=("gas","t_beta_par"), units="keV*g/cm**3")
-def _t_beta_par(field, data):
-    return data["gas","kT"]*data["gas","beta_par"]
-
- at derived_field(name=("gas","t_sz"), units="keV*g/cm**3")
-def _t_sz(field, data):
-    return data["gas","density"]*data["gas","kT"]
+    def _t_sz(field, data):
+        return data["gas","density"]*data["gas","kT"]
+    registry.add_field(("gas","t_sz"),
+                       function = _t_sz,
+                       units="keV*g/cm**3")
 
 def generate_beta_par(L):
     def _beta_par(field, data):
@@ -90,6 +99,7 @@
     def __init__(self, pf, freqs, mue=1.143, high_order=False):
 
         self.pf = pf
+        pf.field_info.load_plugin(setup_sunyaev_zeldovich_fields)
         self.num_freqs = len(freqs)
         self.high_order = high_order
         self.freqs = pf.arr(freqs, "GHz")
@@ -138,8 +148,9 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
-        proj = self.pf.proj("density", axis, center=ctr, data_source=source)
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -202,7 +213,7 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
         dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
         Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -517,7 +517,7 @@
 
     # Now all the object related stuff
     def all_data(self, find_max=False):
-        if find_max: c = self.find_max("Density")[1]
+        if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
             self.domain_left_edge, self.domain_right_edge)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -78,14 +78,11 @@
        Used for baryon fields from the data that are not in all the grids
     display_name : str
        A name used in the plots
-    projection_conversion : unit
-       which unit should we multiply by in a projection?
     """
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None,
-                 projection_conversion="cm"):
+                 not_in_all=False, display_name=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -124,7 +121,6 @@
         dd['display_field'] = True
         dd['not_in_all'] = self.not_in_all
         dd['display_name'] = self.display_name
-        dd['projection_conversion'] = self.projection_conversion
         return dd
 
     def get_units(self):

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -128,7 +128,9 @@
                 return self[item]
         elif finfo is not None and finfo.particle_type:
             if "particle_position" in (item, item[1]) or \
-               "particle_velocity" in (item, item[1]):
+               "particle_velocity" in (item, item[1]) or \
+               "Velocity" in (item, item[1]) or \
+               "Coordinates" in (item, item[1]):
                 # A vector
                 self[item] = \
                   YTArray(np.ones((self.NumberOfParticles, 3)),

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -55,6 +55,7 @@
     known_particle_fields = ()
 
     def __init__(self, pf, field_list, slice_info = None):
+        self._show_field_errors = []
         self.pf = pf
         # Now we start setting things up.
         self.field_list = field_list
@@ -171,8 +172,11 @@
         self.find_dependencies(loaded)
 
     def load_plugin(self, plugin_name, ftype = "gas", skip_check = False):
+        if callable(plugin_name):
+            f = plugin_name
+        else:
+            f = field_plugins[plugin_name]
         orig = set(self.items())
-        f = field_plugins[plugin_name]
         f(self, ftype, slice_info = self.slice_info)
         loaded = [n for n, v in set(self.items()).difference(orig)]
         return loaded
@@ -309,6 +313,8 @@
             try:
                 fd = fi.get_dependencies(pf = self.pf)
             except Exception as e:
+                if field in self._show_field_errors:
+                    raise
                 if type(e) != YTFieldNotFound:
                     mylog.debug("Raises %s during field %s detection.",
                                 str(type(e)), field)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -78,7 +78,6 @@
 
     registry.add_field(("index", "zeros"), function=_zeros,
               units = "",
-              projection_conversion="unitary",
               display_field=False)
 
     def _ones(field, data):
@@ -88,7 +87,6 @@
         return data.apply_units(arr, field.units)
 
     registry.add_field(("index", "ones"), function=_ones,
-              projection_conversion="unitary",
               units = "",
               display_field=False)
 

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -18,7 +18,8 @@
 from .field_plugin_registry import \
     register_field_plugin
 
-from .field_info_container import FieldInfoContainer
+from .field_info_container import \
+    FieldInfoContainer
 
 # Empty FieldInfoContainer
 local_fields = FieldInfoContainer(None, [], None)
@@ -31,4 +32,6 @@
     # info container, and since they are not mutable in any real way, we are
     # fine.
     # Note that we actually don't care about the ftype here.
+    for f in local_fields:
+        registry._show_field_errors.append(f)
     registry.update(local_fields)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -80,8 +80,7 @@
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype,
-             projection_conversion = '1')
+             display_name = "\\mathrm{%s Count}" % ptype)
 
     def particle_mass(field, data):
         pos = data[ptype, coord_name]

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -53,6 +53,12 @@
              / amu_cgs
     return _number_density
 
+def _create_density_func(ftype, species):
+    def _density(field, data):
+        return data[ftype, "%s_fraction" % species] \
+            * data[ftype,'density']
+    return _density
+
 def add_species_field_by_density(registry, ftype, species):
     """
     This takes a field registry, a fluid type, and a species name and then
@@ -68,3 +74,19 @@
     registry.add_field((ftype, "%s_number_density" % species),
                         function = _create_number_density_func(ftype, species),
                         units = "cm**-3")
+
+def add_species_field_by_fraction(registry, ftype, species):
+    """
+    This takes a field registry, a fluid type, and a species name and then
+    adds the other fluids based on that.  This assumes that the field
+    "SPECIES_fraction" already exists and refers to mass fraction.
+    """
+    registry.add_field((ftype, "%s_density" % species), 
+                        function = _create_density_func(ftype, species),
+                        units = "g/cm**3")
+    registry.add_field((ftype, "%s_mass" % species),
+                        function = _create_mass_func(ftype, species),
+                        units = "g")
+    registry.add_field((ftype, "%s_number_density" % species),
+                        function = _create_number_density_func(ftype, species),
+                        units = "cm**-3")

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -20,7 +20,7 @@
     'artio',
     'athena',
     'boxlib',
-    #'chombo',
+    'chombo',
     'enzo',
     'fits',
     'flash',

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 import os.path
-import glob
 import stat
 import weakref
 import cStringIO
@@ -203,14 +202,17 @@
         particle header, star files, etc.
         """
         base_prefix, base_suffix = filename_pattern['amr']
+        aexpstr = 'a'+file_amr.rsplit('a',1)[1].replace(base_suffix,'')
         possibles = glob.glob(os.path.dirname(file_amr)+"/*")
         for filetype, (prefix, suffix) in filename_pattern.iteritems():
             # if this attribute is already set skip it
             if getattr(self, "_file_"+filetype, None) is not None:
                 continue
-            stripped = file_amr.replace(base_prefix, prefix)
-            stripped = stripped.replace(base_suffix, suffix)
-            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            match = None
+            for possible in possibles:
+                if possible.endswith(aexpstr+suffix):
+                    if os.path.basename(possible).startswith(prefix):
+                        match = possible
             if match is not None:
                 mylog.info('discovered %s:%s', filetype, match)
                 setattr(self, "_file_"+filetype, match)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ART frontend tests using SFG1 a=0.330
+ART frontend tests using D9p a=0.500
 
 
 
@@ -22,20 +22,22 @@
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
-_fields = ("Density", "particle_mass", ("all", "particle_position_x"))
+_fields = ("Temperature", "Density", "particle_mass", ("all", "particle_position_x"))
 
-sfg1 = "10MpcBox_csf512_a0.330.d"
+d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d"
 
-
- at requires_pf(sfg1, big_data=True)
-def test_sfg1():
-    pf = data_dir_load(sfg1)
-    yield assert_equal, str(pf), "10MpcBox_csf512_a0.330.d"
+ at requires_pf(d9p, big_data=True)
+def test_d9p():
+    pf = data_dir_load(d9p)
+    yield assert_equal, str(pf), "10MpcBox_HartGal_csf_a0.500.d"
+    for test in big_patch_amr(d9p, _fields):
+        test_d9p.__name__ = test.description
+        yield test
     dso = [None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
-                        sfg1, axis, field, weight_field,
+                        d9p, axis, field, weight_field,
                         ds)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -177,10 +177,20 @@
             if self.dimensionality < 3:
                 dx[i].append(DRE[2] - DLE[1])
         self.level_dds = np.array(dx, dtype="float64")
+        coordinate_type = int(header_file.next())
+        if self.pf.geometry == "cartesian":
+            default_ybounds = (0.0, 1.0)
+            default_zbounds = (0.0, 1.0)
+        elif self.pf.geometry == "cylindrical":
+            # Now we check for dimensionality issues
+            if self.dimensionality != 2:
+                raise RuntimeError("yt needs cylindrical to be 2D")
+            self.level_dds[:,2] = 2*np.pi
+            default_zbounds = (0.0, 2*np.pi)
+        else:
+            raise RuntimeError("yt only supports cartesian and cylindrical coordinates.")
         if int(header_file.next()) != 0:
-            raise RunTimeError("yt only supports cartesian coordinates.")
-        if int(header_file.next()) != 0:
-            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+            raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
         # each level is one group with ngrids on it. 
         # each grid has self.dimensionality number of lines of 2 reals 
@@ -196,11 +206,11 @@
                 if self.dimensionality > 1:
                     ylo, yhi = [float(v) for v in header_file.next().split()]
                 else:
-                    ylo, yhi = 0.0, 1.0
+                    ylo, yhi = default_ybounds
                 if self.dimensionality > 2:
                     zlo, zhi = [float(v) for v in header_file.next().split()]
                 else:
-                    zlo, zhi = 0.0, 1.0
+                    zlo, zhi = default_zbounds
                 self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
                 self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
             # Now we get to the level header filename, which we open and parse.
@@ -569,6 +579,14 @@
         # Skip timesteps per level
         header_file.readline()
         self._header_mesh_start = header_file.tell()
+        header_file.next()
+        coordinate_type = int(header_file.next())
+        if coordinate_type == 0:
+            self.geometry = "cartesian"
+        elif coordinate_type == 1:
+            self.geometry = "cylindrical"
+        else:
+            raise RuntimeError("yt does not yet support spherical geometry")
 
         # overrides for 1/2-dimensional data
         if self.dimensionality == 1: 
@@ -597,12 +615,12 @@
         self.periodicity = ensure_tuple(tmp)
         
     def _setup2d(self):
-#        self._index_class = BoxlibHierarchy2D
-#        self._fieldinfo_fallback = Orion2DFieldInfo
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0]])
         self.domain_right_edge = \
             np.concatenate([self.domain_right_edge, [1.0]])
+        if self.geometry == "cylindrical":
+            self.domain_right_edge[2] = 2.0 * np.pi
         tmp = self.domain_dimensions.tolist()
         tmp.append(1)
         self.domain_dimensions = np.array(tmp)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -24,8 +24,6 @@
 mom_units = "code_mass * code_length / code_time"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
-
-
 def _thermal_energy_density(field, data):
     ke = 0.5 * ( data["momentum_x"]**2
                + data["momentum_y"]**2
@@ -49,6 +47,8 @@
         ("ymom", (mom_units, ["momentum_y"], None)),
         ("zmom", (mom_units, ["momentum_z"], None)),
         ("temperature", ("K", ["temperature"], None)),
+        ("x_velocity", ("cm/s", ["velocity_x"], None)),
+        ("y_velocity", ("cm/s", ["velocity_y"], None)),
     )
 
     known_particle_fields = (

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -19,8 +19,8 @@
       ChomboDataset
 
 from .fields import \
-      ChomboFieldInfo, \
-      add_chombo_field
+      ChomboFieldInfo
+add_chombo_field = ChomboFieldInfo.add_field
 
 from .io import \
       IOHandlerChomboHDF5

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -43,12 +43,15 @@
      mpc_conversion, sec_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_root_only
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 from yt.utilities.io_handler import \
     io_registry
 
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import ChomboFieldInfo, KnownChomboFields
+from .fields import ChomboFieldInfo
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -104,7 +107,6 @@
         self._levels = self._handle.keys()[1:]
         GridIndex.__init__(self,pf,dataset_type)
         self._read_particles()
-        self._fhandle.close()
 
     def _read_particles(self):
         self.particle_filename = self.index_filename[:-4] + 'sink'
@@ -136,7 +138,7 @@
 
     def _detect_output_fields(self):
         ncomp = int(self._handle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._handle['/'].attrs.items()[-ncomp:]]
+        self.field_list = [("chombo", c[1]) for c in self._handle['/'].attrs.items()[-ncomp:]]
           
     def _count_grids(self):
         self.num_grids = 0
@@ -174,32 +176,38 @@
 #        self.grids = np.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
+        self._reconstruct_parent_child()
         for g in self.grids:
             g._prepare_grid()
             g._setup_dx()
-
-        for g in self.grids:
-            g.Children = self._get_grid_children(g)
-            for g1 in g.Children:
-                g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
 
 class ChomboDataset(Dataset):
     _index_class = ChomboHierarchy
-    _fieldinfo_fallback = ChomboFieldInfo
-    _fieldinfo_known = KnownChomboFields
+    _field_info_class = ChomboFieldInfo
 
     def __init__(self, filename, dataset_type='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
+        self.fluid_types += ("chombo",)
         self._handle = h5py.File(filename,'r')
         self.current_time = self._handle.attrs['time']
         self.ini_filename = ini_filename
@@ -216,34 +224,11 @@
     def __del__(self):
         self._handle.close()
 
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2chomboFieldsDict:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
+    def _set_code_unit_attributes(self):
+        self.length_unit = YTQuantity(1.0, "cm")
+        self.mass_unit = YTQuantity(1.0, "g")
+        self.time_unit = YTQuantity(1.0, "s")
+        self.velocity_unit = YTQuantity(1.0, "cm/s")
 
     def _localize(self, f, default):
         if f is None:

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -13,159 +13,71 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import numpy as np
+    FieldInfoContainer
+from yt.frontends.boxlib.fields import \
+    rho_units, \
+    mom_units, \
+    eden_units, \
+    _thermal_energy_density, \
+    _thermal_energy, \
+    _temperature
 
-KnownChomboFields = FieldInfoContainer()
-add_chombo_field = KnownChomboFields.add_field
+rho_units = "code_mass / code_length**3"
+mom_units = "code_mass * code_length / code_time"
+eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
-ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = ChomboFieldInfo.add_field
+# We duplicate everything here from Boxlib, because we want to be able to
+# subclass it and that can be somewhat tricky.
+class ChomboFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density"], None)),
+        ("energy-density", (eden_units, ["energy_density"], None)),
+        ("radiation-energy-density", (eden_units, ["radiation_energy_density"], None)),
+        ("X-momentum", (mom_units, ["momentum_x"], None)),
+        ("Y-momentum", (mom_units, ["momentum_y"], None)),
+        ("Z-momentum", (mom_units, ["momentum_z"], None)),
+        ("temperature", ("K", ["temperature"], None)),
+        ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
+        ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
+        ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
+    )
 
-add_chombo_field("density", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("density")],
-                 units="g/cm**3")
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_momentum_x", (mom_units, [], None)),
+        ("particle_momentum_y", (mom_units, [], None)),
+        ("particle_momentum_z", (mom_units, [], None)),
+        # Note that these are *internal* agmomen
+        ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
+        ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
+        ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
+        ("particle_mlast", ("code_mass", [], None)),
+        ("particle_r", ("code_length", [], None)),
+        ("particle_mdeut", ("code_mass", [], None)),
+        ("particle_n", ("", [], None)),
+        ("particle_mdot", ("code_mass/code_time", [], None)),
+        ("particle_burnstate", ("", [], None)),
+        ("particle_luminosity", ("", [], None)),
+        ("particle_id", ("", ["particle_index"], None)),
+    )
 
-add_chombo_field("X-momentum", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("X-Momentum")],
-                 units="g/cm**2/s",display_name=r"M_x")
-
-add_chombo_field("Y-momentum", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("Y-Momentum")],
-                 units="g/cm**2/s",display_name=r"M_y")
-
-add_chombo_field("Z-momentum", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("Z-Momentum")],
-                 units="g/cm**2/s",display_name=r"M_z")
-
-add_chombo_field("X-magnfield", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("X-Magnfield")],
-                 units="gauss",display_name=r"B_x")
-
-add_chombo_field("Y-magnfield", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("Y-Magnfield")],
-                 units="gauss",display_name=r"B_y")
-
-add_chombo_field("Z-magnfield", function=NullFunc, take_log=False,
-                  validators = [ValidateDataField("Z-Magnfield")],
-                  units="gauss",display_name=r"B_z")
-
-add_chombo_field("energy-density", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("energy-density")],
-                 units="erg/cm**3")
-
-add_chombo_field("radiation-energy-density", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("radiation-energy-density")],
-                 units="erg/cm**3")
-
-def _Density(field,data):
-    """A duplicate of the density field. This is needed because when you try 
-    to instantiate a PlotCollection without passing in a center, the code
-    will try to generate one for you using the "Density" field, which gives an error 
-    if it isn't defined.
-
-    """
-    return data["density"]
-add_field("Density",function=_Density, take_log=True,
-          units='g/cm**3')
-
-def _Bx(field,data):
-    return data["X-magnfield"]
-add_field("Bx", function=_Bx, take_log=False,
-          units="gauss", display_name=r"B_x")
-
-def _By(field,data):
-    return data["Y-magnfield"]
-add_field("By", function=_By, take_log=False,
-          units="gauss", display_name=r"B_y")
-
-def _Bz(field,data):
-    return data["Z-magnfield"]
-add_field("Bz", function=_Bz, take_log=False,
-          units="gauss", display_name=r"B_z")
-
-def _MagneticEnergy(field,data):
-    return (data["X-magnfield"]**2 +
-            data["Y-magnfield"]**2 +
-            data["Z-magnfield"]**2)/2.
-add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
-          units=r"erg/cm**3", display_name=r"B^2 / 8 \pi")
-
-def _xVelocity(field, data):
-    """ Generate x-velocity from x-momentum and density. """
-    return data["X-momentum"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units='cm/s')
-
-def _yVelocity(field,data):
-    """ Generate y-velocity from y-momentum and density. """
-    #try:
-    #    return data["xvel"]
-    #except KeyError:
-    return data["Y-momentum"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units='cm/s')
-
-def _zVelocity(field,data):
-    """ Generate z-velocity from z-momentum and density. """
-    return data["Z-momentum"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units='cm/s')
-
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.index.io
-        if not data.NumberOfParticles > 0:
-            return np.array([], dtype=dtype)
-        else:
-            return io._read_particles(data, p_field).astype(dtype)
-        
-    return _Particles
-
-_particle_field_list = ["mass",
-                        "position_x",
-                        "position_y",
-                        "position_z",
-                        "momentum_x",
-                        "momentum_y",
-                        "momentum_z",
-                        "angmomen_x",
-                        "angmomen_y",
-                        "angmomen_z",
-                        "mlast",
-                        "r",
-                        "mdeut",
-                        "n",
-                        "mdot",
-                        "burnstate",
-                        "luminosity",
-                        "id"]
-
-for pf in _particle_field_list:
-    pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles
-
-def _ParticleMassMsun(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles/1.989e33
-
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True)
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True)
+    def setup_fluid_fields(self):
+        def _get_vel(axis):
+            def velocity(field, data):
+                return data["%smom" % ax]/data["density"]
+        for ax in 'xyz':
+            self.add_field("velocity_%s" % ax, function = _get_vel(ax),
+                           units = "cm/s")
+        self.add_field("thermal_energy",
+                       function = _thermal_energy,
+                       units = "erg/g")
+        self.add_field("thermal_energy_density",
+                       function = _thermal_energy_density,
+                       units = "erg/cm**3")
+        self.add_field("temperature", function=_temperature,
+                       units="K")

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -16,6 +16,7 @@
 import os
 import re
 import numpy as np
+from yt.utilities.logger import ytLogger as mylog
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -26,8 +27,7 @@
     _data_string = 'data:datatype=0'
 
     def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
+        BaseIOHandler.__init__(self, pf)
         self._handle = pf._handle
 
     _field_dict = None
@@ -61,6 +61,52 @@
         
         return data.reshape(dims, order='F')
 
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        chunks = list(chunks)
+        fields.sort(key=lambda a: self.field_dict[a[1]])
+        if selector.__class__.__name__ == "GridSelector":
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            grid = chunks[0].objs[0]
+            lstring = 'level_%i' % grid.Level
+            lev = self._handle[lstring]
+            grid_offset = lev[self._offset_string][grid._level_id]
+            boxsize = grid.ActiveDimensions.prod()
+            for ftype, fname in fields:
+                start = grid_offset+self.field_dict[fname]*boxsize
+                stop = start + boxsize
+                data = lev[self._data_string][start:stop]
+                rv[ftype, fname] = data.reshape(grid.ActiveDimensions,
+                                        order='F')
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            for g in chunk.objs:
+                lstring = 'level_%i' % g.Level
+                lev = self._handle[lstring]
+                grid_offset = lev[self._offset_string][g._level_id]
+                boxsize = g.ActiveDimensions.prod()
+                nd = 0
+                for field in fields:
+                    start = grid_offset+self.field_dict[fname]*boxsize
+                    stop = start + boxsize
+                    data = lev[self._data_string][start:stop]
+                    data = data.reshape(g.ActiveDimensions, order='F')
+                    nd = g.select(selector, data, rv[field], ind) # caches
+                ind += nd
+        return rv
+
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -38,7 +38,7 @@
     mass_sun_cgs
 from yt.utilities.cosmology import Cosmology
 from .fields import \
-    SPHFieldInfo
+    SPHFieldInfo, OWLSFieldInfo
 from .definitions import \
     gadget_header_specs, \
     gadget_field_specs, \
@@ -284,6 +284,7 @@
 
 class OWLSDataset(GadgetHDF5Dataset):
     _particle_mass_name = "Mass"
+    _field_info_class = OWLSFieldInfo
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -23,6 +23,10 @@
     gadget_ptypes, \
     ghdf5_ptypes
 
+from yt.fields.species_fields import add_species_field_by_fraction
+
+
+
 # Here are helper functions for things like vector fields and so on.
 
 def _get_conv(cf):
@@ -50,3 +54,40 @@
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
     )
+
+
+
+
+class OWLSFieldInfo(SPHFieldInfo):
+
+    _species_fractions = ['H_fraction', 'He_fraction', 'C_fraction',
+                          'N_fraction', 'O_fraction', 'Ne_fraction',
+                          'Mg_fraction', 'Si_fraction', 'Fe_fraction']
+
+    # override
+    #--------------------------------------------------------------
+    def __init__(self, *args, **kwargs):
+        
+        new_particle_fields = (
+            ('Hydrogen', ('', ['H_fraction'], None)),
+            ('Helium', ('', ['He_fraction'], None)),
+            ('Carbon', ('', ['C_fraction'], None)),
+            ('Nitrogen', ('', ['N_fraction'], None)),
+            ('Oxygen', ('', ['O_fraction'], None)),
+            ('Neon', ('', ['Ne_fraction'], None)),
+            ('Magnesium', ('', ['Mg_fraction'], None)),
+            ('Silicon', ('', ['Si_fraction'], None)),
+            ('Iron', ('', ['Fe_fraction'], None))
+            )
+
+        self.known_particle_fields += new_particle_fields
+        
+        super(OWLSFieldInfo,self).__init__( *args, **kwargs )
+
+
+        
+    def setup_fluid_fields(self):
+        # here species_name is "H", "He", etc
+        for s in self._species_fractions:
+            species_name = s.split('_')[0]
+            add_species_field_by_fraction(self, "gas", species_name)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import glob
 import h5py
 import numpy as np
 from .definitions import gadget_ptypes, ghdf5_ptypes
@@ -52,6 +53,9 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
     _known_ptypes = ghdf5_ptypes
     _var_mass = None
+    _element_fields = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
+                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
+
 
     @property
     def var_mass(self):
@@ -99,13 +103,20 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
+                    
                     if field in ("Mass", "Masses") and \
                         ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
                         ind = self._known_ptypes.index(ptype) 
                         data[:] = self.pf["Massarr"][ind]
+
+                    elif field in self._element_fields:
+                        rfield = 'ElementAbundance/' + field
+                        data = g[rfield][:][mask,...]
+
                     else:
                         data = g[field][:][mask,...]
+
                     yield (ptype, field), data
             f.close()
 
@@ -143,24 +154,46 @@
         npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) 
         return npart
 
+
     def _identify_fields(self, data_file):
         f = _get_h5_handle(data_file.filename)
         fields = []
-        cname = self.pf._particle_coordinates_name
-        mname = self.pf._particle_mass_name
-        for key in f.keys():
+        cname = self.pf._particle_coordinates_name  # Coordinates
+        mname = self.pf._particle_mass_name  # Mass
+
+        # loop over all keys in OWLS hdf5 file
+        #--------------------------------------------------
+        for key in f.keys():   
+
+            # only want particle data
+            #--------------------------------------
             if not key.startswith("PartType"): continue
+
+            # particle data group
+            #--------------------------------------
             g = f[key]
             if cname not in g: continue
+
+            # note str => not unicode!
+
             #ptype = int(key[8:])
             ptype = str(key)
+
+            # loop over all keys in PartTypeX group
+            #----------------------------------------
             for k in g.keys():
-                if not hasattr(g[k], "shape"): continue
-                # str => not unicode!
-                fields.append((ptype, str(k)))
-            if mname not in g.keys():
-                # We'll append it anyway.
-                fields.append((ptype, mname))
+
+                if k == 'ElementAbundance':
+                    gp = g[k]
+                    for j in gp.keys():
+                        kk = j
+                        fields.append((ptype, str(kk)))
+                else:
+                    kk = k
+                    if not hasattr(g[kk], "shape"): continue
+                    fields.append((ptype, str(kk)))
+
+
         f.close()
         return fields, {}
 
@@ -357,6 +390,7 @@
                 "DarkMatter",
                 "Stars" )
 
+    _aux_fields = []
     _fields = ( ("Gas", "Mass"),
                 ("Gas", "Coordinates"),
                 ("Gas", "Velocities"),
@@ -382,7 +416,50 @@
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
-    def _fill_fields(self, fields, vals, mask):
+    def _read_aux_fields(self, field, mask, data_file):
+        """
+        Read in auxiliary files from gasoline/pkdgrav 
+        """
+        filename = data_file.filename+'.'+field
+        dtype = None
+        # We need to do some fairly ugly detection to see what format the auxiliary
+        # files are in.  They can be either ascii or binary, and the binary files can be
+        # either floats, ints, or doubles.  We're going to use a try-catch cascade to 
+        # determine the format.
+        try:#ASCII
+            auxdata = np.genfromtxt(filename, skip_header=1)
+            if auxdata.size != np.sum(data_file.total_particles.values()):
+                print "Error reading auxiliary tipsy file"
+                raise RuntimeError 
+        except ValueError:#binary/xdr
+            f = open(filename, 'rb')
+            l = struct.unpack(data_file.pf.endian+"i", f.read(4))[0]
+            if l != np.sum(data_file.total_particles.values()):
+                print "Error reading auxiliary tipsy file"
+                raise RuntimeError
+            dtype = 'd'
+            if field in ('iord', 'igasorder', 'grp'):#These fields are integers
+                dtype = 'i'
+            try:# If we try loading doubles by default, we can catch an exception and try floats next
+                auxdata = np.array(struct.unpack(data_file.pf.endian+(l*dtype), f.read()))
+            except struct.error:
+                f.seek(4)
+                dtype = 'f'
+                try:
+                    auxdata = np.array(struct.unpack(data_file.pf.endian+(l*dtype), f.read()))
+                except struct.error: # None of the binary attempts to read succeeded
+                    print "Error reading auxiliary tipsy file"
+                    raise RuntimeError
+            
+        # Use the mask to slice out the appropriate particle type data
+        if mask.size == data_file.total_particles['Gas']:
+            return auxdata[:data_file.total_particles['Gas']]
+        elif mask.size == data_file.total_particles['DarkMatter']:
+            return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
+        else:
+            return auxdata[-data_file.total_particles['Stars']:]
+
+    def _fill_fields(self, fields, vals, mask, data_file):
         if mask is None:
             size = 0
         else:
@@ -390,7 +467,9 @@
         rv = {}
         for field in fields:
             mylog.debug("Allocating %s values for %s", size, field)
-            if field in self._vector_fields:
+            if field in self._aux_fields: #Read each of the auxiliary fields
+                rv[field] = self._read_aux_fields(field, mask, data_file)
+            elif field in self._vector_fields:
                 rv[field] = np.empty((size, 3), dtype="float64")
                 if size == 0: continue
                 rv[field][:,0] = vals[field]['x'][mask]
@@ -408,6 +487,7 @@
                       self.domain_right_edge[i] - eps)
         return rv
 
+
     def _read_particle_coords(self, chunks, ptf):
         data_files = set([])
         for chunk in chunks:
@@ -443,7 +523,7 @@
                     p["Coordinates"]['y'].astype("float64"),
                     p["Coordinates"]['z'].astype("float64"))
                 if mask is None: continue
-                tf = self._fill_fields(field_list, p, mask)
+                tf = self._fill_fields(field_list, p, mask, data_file)
                 for field in field_list:
                     yield (ptype, field), tf.pop(field)
             f.close()
@@ -510,6 +590,8 @@
         pds = {}
         field_list = []
         tp = data_file.total_particles
+        aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
+        self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
@@ -523,6 +605,12 @@
             field_list.append((ptype, field))
         for ptype in pds:
             self._pdtypes[ptype] = np.dtype(pds[ptype])
+        if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
+            field_list += [("Gas",a) for a in self._aux_fields] 
+        if any(["DarkMatter"==f[0] for f in field_list]):
+            field_list += [("DarkMatter",a) for a in self._aux_fields] 
+        if any(["Stars"==f[0] for f in field_list]):
+            field_list += [("Stars",a) for a in self._aux_fields] 
         self._field_list = field_list
         return self._field_list
 

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -605,6 +605,8 @@
     else:
         registry = None
     if isinstance(length, YTArray):
+        if registry is not None:
+            length.units.registry = registry
         return length.in_units("code_length")
     if isinstance(length, numeric_type):
         return YTArray(length, 'code_length', registry=registry)

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -17,7 +17,8 @@
 import numpy as np
 from .coordinate_handler import \
     CoordinateHandler, \
-    _unknown_coord
+    _unknown_coord, \
+    _get_coord_fields
 
 class CartesianCoordinateHandler(CoordinateHandler):
 
@@ -25,16 +26,8 @@
         super(CartesianCoordinateHandler, self).__init__(pf)
 
     def setup_fields(self, registry):
-        def _get_coord_fields(axi, ax):
-            def _dds(field, data):
-                rv = data.pf.arr(data.fwidth[...,axi], 'code_length')
-                return data._reshape_vals(rv)
-            def _coords(field, data):
-                rv = data.pf.arr(data.fcoords[...,axi], 'code_length')
-                return data._reshape_vals(rv)
-            return _dds, _coords
         for axi, ax in enumerate('xyz'):
-            f1, f2 = _get_coord_fields(axi, ax)
+            f1, f2 = _get_coord_fields(axi)
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -32,6 +32,15 @@
 def _unknown_coord(field, data):
     raise YTCoordinateNotImplemented
 
+def _get_coord_fields(axi, units = "code_length"):
+    def _dds(field, data):
+        rv = data.pf.arr(data.fwidth[...,axi], units)
+        return data._reshape_vals(rv)
+    def _coords(field, data):
+        rv = data.pf.arr(data.fcoords[...,axi], units)
+        return data._reshape_vals(rv)
+    return _dds, _coords
+
 class CoordinateHandler(object):
     
     def __init__(self, pf):

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/geometry/cylindrical_coordinates.py
--- a/yt/geometry/cylindrical_coordinates.py
+++ b/yt/geometry/cylindrical_coordinates.py
@@ -18,8 +18,8 @@
 from yt.units.yt_array import YTArray
 from .coordinate_handler import \
     CoordinateHandler, \
-    _unknown_coord
-
+    _unknown_coord, \
+    _get_coord_fields
 #
 # Cylindrical fields
 #
@@ -36,54 +36,29 @@
         registry.add_field(("index", "dy"), function=_unknown_coord)
         registry.add_field(("index", "x"), function=_unknown_coord)
         registry.add_field(("index", "y"), function=_unknown_coord)
+        f1, f2 = _get_coord_fields(0)
+        registry.add_field(("index", "dr"), function = f1,
+                           display_field = False,
+                           units = "code_length")
+        registry.add_field(("index", "r"), function = f2,
+                           display_field = False,
+                           units = "code_length")
 
-        def _dr(field, data):
-            return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[0]
-        registry.add_field(("index", "dr"),
-                 function=_dr,
-                 display_field=False,
-                 validators=[ValidateSpatial(0)])
+        f1, f2 = _get_coord_fields(1)
+        registry.add_field(("index", "dz"), function = f1,
+                           display_field = False,
+                           units = "code_length")
+        registry.add_field(("index", "z"), function = f2,
+                           display_field = False,
+                           units = "code_length")
 
-        def _dz(field, data):
-            return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[1]
-        registry.add_field(("index", "dz"),
-                 function=_dz,
-                 display_field=False,
-                 validators=[ValidateSpatial(0)])
-
-        def _dtheta(field, data):
-            return np.ones(data.ActiveDimensions, dtype='float64') * data.dds[2]
-        registry.add_field(("index", "dtheta"),
-                 function=_dtheta,
-                 display_field=False,
-                 validators=[ValidateSpatial(0)])
-
-        def _coordR(field, data):
-            dim = data.ActiveDimensions[0]
-            return (np.ones(data.ActiveDimensions, dtype='float64')
-                           * np.arange(data.ActiveDimensions[0])[:,None,None]
-                    +0.5) * data["index", "dr"] + data.LeftEdge[0]
-        registry.add_field(("index", "r"),
-                 function=_coordR, display_field=False,
-                 validators=[ValidateSpatial(0)])
-
-        def _coordZ(field, data):
-            dim = data.ActiveDimensions[1]
-            return (np.ones(data.ActiveDimensions, dtype='float64')
-                           * np.arange(data.ActiveDimensions[1])[None,:,None]
-                    +0.5) * data["index", "dz"] + data.LeftEdge[1]
-        registry.add_field(("index", "z"),
-                 function=_coordZ, display_field=False,
-                 validators=[ValidateSpatial(0)])
-
-        def _coordTheta(field, data):
-            dim = data.ActiveDimensions[2]
-            return (np.ones(data.ActiveDimensions, dtype='float64')
-                           * np.arange(data.ActiveDimensions[2])[None,None,:]
-                    +0.5) * data["index", "dtheta"] + data.LeftEdge[2]
-        registry.add_field(("index", "theta"),
-                 function=_coordTheta, display_field=False,
-                 validators=[ValidateSpatial(0)])
+        f1, f2 = _get_coord_fields(2, "")
+        registry.add_field(("index", "dtheta"), function = f1,
+                           display_field = False,
+                           units = "")
+        registry.add_field(("index", "theta"), function = f2,
+                           display_field = False,
+                           units = "")
 
         def _CylindricalVolume(field, data):
             return data["index", "dtheta"] \
@@ -91,7 +66,8 @@
                  * data["index", "dr"] \
                  * data["index", "dz"]
         registry.add_field(("index", "cell_volume"),
-                 function=_CylindricalVolume)
+                 function=_CylindricalVolume,
+                 units = "code_length**3")
 
 
     def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -145,7 +145,7 @@
         ndims, peak_value = 1.0,
         fields = ("density", "velocity_x", "velocity_y", "velocity_z"),
         units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
-        negative = False, nprocs = 1, particles = 0):
+        negative = False, nprocs = 1, particles = 0, length_unit=1.0):
     from yt.data_objects.api import data_object_registry
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
@@ -175,7 +175,7 @@
             data[f] = (np.random.random(size = particles) - 0.5, 'cm/s')
         data['particle_mass'] = (np.random.random(particles), 'g')
         data['number_of_particles'] = particles
-    ug = load_uniform_grid(data, ndims, 1.0, nprocs=nprocs)
+    ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
     return ug
 
 def fake_amr_pf(fields = ("Density",)):

diff -r 031ac7fd776e22564a3f7595648c8d9de4142e3b -r b0a2e2d086e037a90afcdff3a013848e141d2c39 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -391,10 +391,10 @@
     """
     Test fixing the length of an array. Used in spheres and other data objects
     """
-    pf = fake_random_pf(64, nprocs=1)
+    pf = fake_random_pf(64, nprocs=1, length_unit=10)
     length = pf.quan(1.0,'code_length')
     new_length = fix_length(length, pf=pf)
-    yield assert_equal, length, new_length
+    yield assert_equal, YTQuantity(10, 'cm'), new_length
 
 def test_ytarray_pickle():
     pf = fake_random_pf(64, nprocs=1)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/da074301bf0e/
Changeset:   da074301bf0e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 05:21:31
Summary:     Merging with fix
Affected #:  1 file

diff -r b0a2e2d086e037a90afcdff3a013848e141d2c39 -r da074301bf0e09c14d966c40338f1729fba0c1e1 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -177,7 +177,7 @@
             if self.dimensionality < 3:
                 dx[i].append(DRE[2] - DLE[1])
         self.level_dds = np.array(dx, dtype="float64")
-        coordinate_type = int(header_file.next())
+        header_file.next()
         if self.pf.geometry == "cartesian":
             default_ybounds = (0.0, 1.0)
             default_zbounds = (0.0, 1.0)
@@ -580,7 +580,11 @@
         header_file.readline()
         self._header_mesh_start = header_file.tell()
         header_file.next()
-        coordinate_type = int(header_file.next())
+        next_line = header_file.next()
+        if len(next_line.split()) == 1:
+            coordinate_type = int(next_line)
+        else:
+            coordinate_type = 0
         if coordinate_type == 0:
             self.geometry = "cartesian"
         elif coordinate_type == 1:


https://bitbucket.org/yt_analysis/yt/commits/1111cbf28dfc/
Changeset:   1111cbf28dfc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 06:27:52
Summary:     Fixing bootstrap issue.
Affected #:  1 file

diff -r da074301bf0e09c14d966c40338f1729fba0c1e1 -r 1111cbf28dfc6d103e6935e3cef6f537c60bbf9e yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -2,8 +2,6 @@
 import setuptools
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
-from yt.utilities.setup import \
-    check_for_dependencies
 
 def check_for_openmp():
     # Create a temporary directory


https://bitbucket.org/yt_analysis/yt/commits/59bba5b698b8/
Changeset:   59bba5b698b8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 14:56:07
Summary:     Moving run_nose to yt.testing.
Affected #:  2 files

diff -r da074301bf0e09c14d966c40338f1729fba0c1e1 -r 59bba5b698b85748db0b6b15db78d8c17396a498 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -624,3 +624,25 @@
         return _func
     return compare_results(func)
 
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys, yt
+    from yt.funcs import mylog
+    orig_level = mylog.getEffectiveLevel()
+    mylog.setLevel(50)
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(yt.__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        mylog.setLevel(orig_level)

diff -r da074301bf0e09c14d966c40338f1729fba0c1e1 -r 59bba5b698b85748db0b6b15db78d8c17396a498 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -47,26 +47,3 @@
     from .framework import AnswerTesting
 except ImportError:
     raise
-
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys, yt
-    from yt.funcs import mylog
-    orig_level = mylog.getEffectiveLevel()
-    mylog.setLevel(50)
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(yt.__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        mylog.setLevel(orig_level)


https://bitbucket.org/yt_analysis/yt/commits/958a0486151a/
Changeset:   958a0486151a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 14:57:04
Summary:     Merge
Affected #:  1 file

diff -r 59bba5b698b85748db0b6b15db78d8c17396a498 -r 958a0486151a1e8439bf078556649d2451c0f18e yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -2,8 +2,6 @@
 import setuptools
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
-from yt.utilities.setup import \
-    check_for_dependencies
 
 def check_for_openmp():
     # Create a temporary directory


https://bitbucket.org/yt_analysis/yt/commits/fec39112bec2/
Changeset:   fec39112bec2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 17:40:05
Summary:     Enable parallelism when requested.

yt.enable_parallelism() now works, and all of the logic for enabling it and
describing it as being enabled is included there.
Affected #:  3 files

diff -r 958a0486151a1e8439bf078556649d2451c0f18e -r fec39112bec2c1ee426ea154b6c6b3a627ec2d02 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -146,7 +146,7 @@
     off_axis_projection
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects
+    parallel_objects, enable_parallelism
 
 from yt.convenience import \
     load, simulation
@@ -155,4 +155,3 @@
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
 
-

diff -r 958a0486151a1e8439bf078556649d2451c0f18e -r fec39112bec2c1ee426ea154b6c6b3a627ec2d02 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -24,35 +24,18 @@
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
 def turn_on_parallelism():
+    parallel_capable = False
     try:
         from mpi4py import MPI
     except ImportError as e:
         mylog.error("Warning: Attempting to turn on parallelism, " +
                     "but mpi4py import failed. Try pip install mpi4py.")
         raise e
-    parallel_capable = (MPI.COMM_WORLD.size > 1)
-    if parallel_capable:
-        mylog.info("Global parallel computation enabled: %s / %s",
-                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel"] = "True"
-        if exe_name == "embed_enzo" or \
-            ("_parallel" in dir(sys) and sys._parallel == True):
-            ytcfg["yt","inline"] = "True"
-        # I believe we do not need to turn this off manually
-        #ytcfg["yt","StoreParameterFiles"] = "False"
-        # Now let's make sure we have the right options set.
-        if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","LogFile"):
-                ytcfg["yt","LogFile"] = "False"
-                yt.utilities.logger.disable_file_logging()
         # Now we have to turn on the parallelism from the perspective of the
         # parallel_analysis_interface
-        from yt.utilities.parallel_tools.parallel_analysis_interface import \
-            enable_parallelism
-
-        enable_parallelism()
+    from yt.utilities.parallel_tools.parallel_analysis_interface import \
+        enable_parallelism
+    parallel_capable = enable_parallelism()
     return parallel_capable
 
 # This fallback is for Paraview:

diff -r 958a0486151a1e8439bf078556649d2451c0f18e -r fec39112bec2c1ee426ea154b6c6b3a627ec2d02 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -48,11 +48,24 @@
 
 # Set up translation table and import things
 
+exe_name = os.path.basename(sys.executable)
 def enable_parallelism():
     global parallel_capable
-    parallel_capable = ytcfg.getboolean("yt", "__parallel")
+    from mpi4py import MPI
+    parallel_capable = (MPI.COMM_WORLD.size > 1)
     if not parallel_capable: return False
-    from mpi4py import MPI
+    mylog.info("Global parallel computation enabled: %s / %s",
+               MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+    ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+    ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
+    ytcfg["yt","__parallel"] = "True"
+    if exe_name == "embed_enzo" or \
+        ("_parallel" in dir(sys) and sys._parallel == True):
+        ytcfg["yt","inline"] = "True"
+    if MPI.COMM_WORLD.rank > 0:
+        if ytcfg.getboolean("yt","LogFile"):
+            ytcfg["yt","LogFile"] = "False"
+            yt.utilities.logger.disable_file_logging()
     yt.utilities.logger.uncolorize_logging()
     # Even though the uncolorize function already resets the format string,
     # we reset it again so that it includes the processor.
@@ -77,6 +90,7 @@
         min = MPI.MIN,
         max = MPI.MAX
     ))
+    return True
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.


https://bitbucket.org/yt_analysis/yt/commits/dd346818141e/
Changeset:   dd346818141e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-25 18:37:47
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #729)

YTEP-0019
Affected #:  12 files

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -74,25 +74,84 @@
 
 __version__ = "3.0-dev"
 
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys
-    from yt.config import ytcfg
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg.set("yt","suppressStreamLogging", 'True')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
+# First module imports
+import numpy as np # For modern purposes
+import numpy # In case anyone wishes to use it by name
+
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
+from yt.utilities.logger import ytLogger as mylog
+
+import yt.utilities.physical_constants as physical_constants
+import yt.units as units
+from yt.units.yt_array import YTArray, YTQuantity
+
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
+
+from yt.data_objects.api import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    DatasetSeries, \
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
+
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
+
+from yt.frontends.stream.api import \
+    load_uniform_grid, load_amr_grids, \
+    load_particles, load_hexahedral_mesh, load_octree
+
+# For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
+GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
+TipsyStaticOutput = deprecated_class(TipsyDataset)
+
+# Now individual component imports from the visualization API
+from yt.visualization.api import \
+    PlotCollection, PlotCollectionInteractive, \
+    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
+    apply_colormap, scale_image, write_projection, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps, ProfilePlot, PhasePlot
+
+from yt.visualization.volume_rendering.api import \
+    off_axis_projection
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects, enable_parallelism
+
+from yt.convenience import \
+    load, simulation
+
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles, periodic_position
+

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -7,7 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('index_subset', parent_package, top_path)
+    config = Configuration('hierarchy_subset', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -12,3 +12,29 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import sys, types, os, glob, cPickle, time, importlib
+
+_frontends = [
+    'art',
+    'artio',
+    'athena',
+    'boxlib',
+    'chombo',
+    'enzo',
+    'fits',
+    'flash',
+    'gdf',
+    'halo_catalogs',
+    'moab',
+    #'pluto',
+    'ramses',
+    'sph',
+    'stream',
+]
+
+class _frontend_container:
+    def __init__(self):
+        for frontend in _frontends:
+            _mod = "yt.frontends.%s.api" % frontend
+            setattr(self, frontend, importlib.import_module(_mod))

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -727,3 +727,14 @@
         return cls(*args, **kwargs)
     return _func
     
+def enable_plugins():
+    from yt.config import ytcfg
+    my_plugin_name = ytcfg.get("yt","pluginfilename")
+    # We assume that it is with respect to the $HOME/.yt directory
+    if os.path.isfile(my_plugin_name):
+        _fn = my_plugin_name
+    else:
+        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+    if os.path.isfile(_fn):
+        mylog.info("Loading plugins from %s", _fn)
+        execfile(_fn)

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -20,11 +20,8 @@
 # ALL IMPORTS GO HERE
 #
 
-# First module imports
-import sys, types, os, glob, cPickle, time
-import numpy as na # For historical reasons
-import numpy as np # For modern purposes
-import numpy # In case anyone wishes to use it by name
+import os
+from yt import *
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
@@ -35,13 +32,7 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.performance_counters import yt_counters, time_function
 from yt.config import ytcfg, ytcfg_defaults
-import yt.utilities.physical_constants as physical_constants
-import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
 
 from yt.utilities.logger import level as __level
 if __level >= int(ytcfg_defaults["loglevel"]):
@@ -49,134 +40,6 @@
     mylog.debug("Turning off NumPy error reporting")
     np.seterr(all = 'ignore')
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
-
-from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    data_object_registry, \
-    DatasetSeries, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile, \
-    Profile1D, Profile2D, Profile3D
-
-from yt.frontends.enzo.api import \
-    EnzoDataset, EnzoDatasetInMemory, \
-    EnzoSimulation, EnzoFieldInfo, add_enzo_field
-
-# Boxlib stuff
-from yt.frontends.boxlib.api import \
-    BoxlibDataset
-
-# Orion stuff
-#from yt.frontends.boxlib.api import \
-#    OrionDataset, OrionFieldInfo, add_orion_field
-
-# Maestro stuff
-#from yt.frontends.boxlib.api import \
-#    MaestroDataset
-
-# Castro stuff
-#from yt.frontends.boxlib.api import \
-#    CastroDataset
-
-from yt.frontends.flash.api import \
-    FLASHDataset, FLASHFieldInfo
-
-from yt.frontends.artio.api import \
-    ARTIODataset, ARTIOFieldInfo
-
-from yt.frontends.ramses.api import \
-    RAMSESDataset, RAMSESFieldInfo
-
-from yt.frontends.halo_catalogs.api import \
-    HaloCatalogDataset, HaloCatalogFieldInfo, \
-    RockstarDataset, RockstarFieldInfo
-
-from yt.frontends.chombo.api import \
-    ChomboDataset, ChomboFieldInfo, add_chombo_field
-
-from yt.frontends.gdf.api import \
-    GDFDataset, GDFFieldInfo, add_gdf_field
-
-from yt.frontends.moab.api import \
-    MoabHex8Dataset, MoabFieldInfo, \
-    PyneMoabHex8Dataset, PyneFieldInfo
-
-from yt.frontends.athena.api import \
-    AthenaDataset, AthenaFieldInfo
-
-from yt.frontends.art.api import \
-    ARTDataset, ARTFieldInfo
-
-#from yt.frontends.pluto.api import \
-#     PlutoDataset, PlutoFieldInfo, add_pluto_field
-
-from yt.frontends.stream.api import \
-    StreamDataset, \
-    StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh, load_octree
-
-from yt.frontends.sph.api import \
-    OWLSDataset, SPHFieldInfo, \
-    GadgetDataset, GadgetHDF5Dataset, \
-    TipsyDataset
-
-# For backwards compatibility
-GadgetStaticOutput = deprecated_class(GadgetDataset)
-TipsyStaticOutput = deprecated_class(TipsyDataset)
-
-#from yt.analysis_modules.list_modules import \
-#    get_available_modules, amods
-#available_analysis_modules = get_available_modules()
-
-from yt.frontends.fits.api import \
-    FITSDataset, FITSFieldInfo
-
-# Import our analysis modules
-from yt.analysis_modules.halo_finding.api import \
-    HaloFinder
-
-from yt.utilities.definitions import \
-    axis_names, x_dict, y_dict, inv_axis_names
-
-# Now individual component imports from the visualization API
-from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    callback_registry, write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, \
-    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
-    ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
-
-from yt.visualization.volume_rendering.api import \
-    ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    Camera, off_axis_projection, MosaicFisheyeCamera
-
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects
-
-for name, cls in callback_registry.items():
-    exec("%s = cls" % name)
-
-from yt.convenience import \
-    load, projload, simulation
-
-# Import some helpful math utilities
-from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position
-
-
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions
 # that get used every time that they don't have to *define* every time.
@@ -184,12 +47,4 @@
 # Unfortunately, for now, I think the easiest and simplest way of doing
 # this is also the most dangerous way.
 if ytcfg.getboolean("yt","loadfieldplugins"):
-    my_plugin_name = ytcfg.get("yt","pluginfilename")
-    # We assume that it is with respect to the $HOME/.yt directory
-    if os.path.isfile(my_plugin_name):
-        _fn = my_plugin_name
-    else:
-        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
-    if os.path.isfile(_fn):
-        mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+    enable_plugins()

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -24,29 +24,18 @@
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
 def turn_on_parallelism():
+    parallel_capable = False
     try:
         from mpi4py import MPI
     except ImportError as e:
         mylog.error("Warning: Attempting to turn on parallelism, " +
                     "but mpi4py import failed. Try pip install mpi4py.")
         raise e
-    parallel_capable = (MPI.COMM_WORLD.size > 1)
-    if parallel_capable:
-        mylog.info("Global parallel computation enabled: %s / %s",
-                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel"] = "True"
-        if exe_name == "embed_enzo" or \
-            ("_parallel" in dir(sys) and sys._parallel == True):
-            ytcfg["yt","inline"] = "True"
-        # I believe we do not need to turn this off manually
-        #ytcfg["yt","StoreParameterFiles"] = "False"
-        # Now let's make sure we have the right options set.
-        if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","LogFile"):
-                ytcfg["yt","LogFile"] = "False"
-                yt.utilities.logger.disable_file_logging()
+        # Now we have to turn on the parallelism from the perspective of the
+        # parallel_analysis_interface
+    from yt.utilities.parallel_tools.parallel_analysis_interface import \
+        enable_parallelism
+    parallel_capable = enable_parallelism()
     return parallel_capable
 
 # This fallback is for Paraview:

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -624,3 +624,25 @@
         return _func
     return compare_results(func)
 
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys, yt
+    from yt.funcs import mylog
+    orig_level = mylog.getEffectiveLevel()
+    mylog.setLevel(50)
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(yt.__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        mylog.setLevel(orig_level)

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -2,8 +2,6 @@
 import setuptools
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
-from yt.utilities.setup import \
-    check_for_dependencies
 
 def check_for_openmp():
     # Create a temporary directory

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -33,9 +33,39 @@
 
 parallel_capable = ytcfg.getboolean("yt", "__parallel")
 
+dtype_names = dict(
+        float32 = "MPI.FLOAT",
+        float64 = "MPI.DOUBLE",
+        int32   = "MPI.INT",
+        int64   = "MPI.LONG",
+        c       = "MPI.CHAR",
+)
+op_names = dict(
+        sum = "MPI.SUM",
+        min = "MPI.MIN",
+        max = "MPI.MAX"
+)
+
 # Set up translation table and import things
-if parallel_capable:
+
+exe_name = os.path.basename(sys.executable)
+def enable_parallelism():
+    global parallel_capable
     from mpi4py import MPI
+    parallel_capable = (MPI.COMM_WORLD.size > 1)
+    if not parallel_capable: return False
+    mylog.info("Global parallel computation enabled: %s / %s",
+               MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+    ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+    ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
+    ytcfg["yt","__parallel"] = "True"
+    if exe_name == "embed_enzo" or \
+        ("_parallel" in dir(sys) and sys._parallel == True):
+        ytcfg["yt","inline"] = "True"
+    if MPI.COMM_WORLD.rank > 0:
+        if ytcfg.getboolean("yt","LogFile"):
+            ytcfg["yt","LogFile"] = "False"
+            yt.utilities.logger.disable_file_logging()
     yt.utilities.logger.uncolorize_logging()
     # Even though the uncolorize function already resets the format string,
     # we reset it again so that it includes the processor.
@@ -48,32 +78,19 @@
     if ytcfg.getint("yt","LogLevel") < 20:
         yt.utilities.logger.ytLogger.warning(
           "Log Level is set low -- this could affect parallel performance!")
-    dtype_names = dict(
+    dtype_names.update(dict(
             float32 = MPI.FLOAT,
             float64 = MPI.DOUBLE,
             int32   = MPI.INT,
             int64   = MPI.LONG,
             c       = MPI.CHAR,
-    )
-    op_names = dict(
+    ))
+    op_names.update(dict(
         sum = MPI.SUM,
         min = MPI.MIN,
         max = MPI.MAX
-    )
-
-else:
-    dtype_names = dict(
-            float32 = "MPI.FLOAT",
-            float64 = "MPI.DOUBLE",
-            int32   = "MPI.INT",
-            int64   = "MPI.LONG",
-            c       = "MPI.CHAR",
-    )
-    op_names = dict(
-            sum = "MPI.SUM",
-            min = "MPI.MIN",
-            max = "MPI.MAX"
-    )
+    ))
+    return True
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -11,6 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import numpy as np
+from itertools import izip
 
 import matplotlib
 import matplotlib.colors as cc
@@ -82,9 +83,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
-         'green':zip(_vs,_kamae_grn,_kamae_grn),
-         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':izip(_vs,_kamae_red,_kamae_red),
+         'green':izip(_vs,_kamae_grn,_kamae_grn),
+         'blue':izip(_vs,_kamae_blu,_kamae_blu)}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -147,9 +148,9 @@
 _vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': zip(_vs,v[0],v[0]),
-                  'green': zip(_vs,v[1],v[1]),
-                  'blue': zip(_vs,v[2],v[2]) }
+        cdict = { 'red': izip(_vs,v[0],v[0]),
+                  'green': izip(_vs,v[1],v[1]),
+                  'blue': izip(_vs,v[2],v[2]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):

diff -r bc7abe74aa1e1bc01d6cbf0697733fd70ffe9cdd -r dd346818141e863da09333691c2ba0bff9b52b85 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -20,9 +20,9 @@
 from yt.testing import \
     fake_random_pf
 import numpy as np
-from yt.mods import ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.volume_rendering.api import \
-    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera
+    PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
+    ColorTransferFunction, ProjectionTransferFunction
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list