[yt-svn] commit/yt: 15 new changesets
Bitbucket
commits-noreply at bitbucket.org
Tue Dec 20 14:19:04 PST 2011
15 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/64ee72664bb2/
changeset: 64ee72664bb2
branch: yt
user: MatthewTurk
date: 2011-11-23 20:27:20
summary: Beginning the consolidation of the --whatever argument parsing and moving a few
important items into a startup_items.py file. Command line utilities sort of
work but will not recognize or receive commands line --parallel.
affected #: 6 files
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -42,6 +42,7 @@
__global_parallel_size = '1',
__topcomm_parallel_rank = '0',
__topcomm_parallel_size = '1',
+ __command_line = 'False',
storeparameterfiles = 'False',
parameterfilestore = 'parameter_files.csv',
maximumstoredpfs = '500',
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -31,35 +31,8 @@
from yt.utilities.logger import ytLogger as mylog
import yt.utilities.progressbar as pb
import yt.utilities.rpdb as rpdb
-
-# Some compatibility functions. In the long run, these *should* disappear as
-# we move toward newer python versions. Most were implemented to get things
-# running on DataStar.
-
-# If we're running on python2.4, we need a 'wraps' function
-def blank_wrapper(f):
- return lambda a: a
-
-try:
- from functools import wraps
-except ImportError:
- wraps = blank_wrapper
-
-# We need to ensure that we have a defaultdict implementation
-
-class __defaultdict(dict):
- def __init__(self, func):
- self.__func = func
- dict.__init__(self)
- def __getitem__(self, key):
- if not self.has_key(key):
- self.__setitem__(key, self.__func())
- return dict.__getitem__(self, key)
-
-try:
- from collections import defaultdict
-except ImportError:
- defaultdict = __defaultdict
+from collections import defaultdict
+from functools import wraps
# Some functions for handling sequences and other types
@@ -78,7 +51,7 @@
string to a list, for instance ensuring the *fields* as an argument is a
list.
"""
- if obj == None:
+ if obj is None:
return [obj]
if not isinstance(obj, types.ListType):
return [obj]
@@ -385,18 +358,6 @@
def signal_ipython(signo, frame):
insert_ipython(2)
-# We use two signals, SIGUSR1 and SIGUSR2. In a non-threaded environment,
-# we set up handlers to process these by printing the current stack and to
-# raise a RuntimeError. The latter can be used, inside pdb, to catch an error
-# and then examine the current stack.
-try:
- signal.signal(signal.SIGUSR1, signal_print_traceback)
- mylog.debug("SIGUSR1 registered for traceback printing")
- signal.signal(signal.SIGUSR2, signal_ipython)
- mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError: # Not in main thread
- pass
-
def paste_traceback(exc_type, exc, tb):
"""
This is a traceback handler that knows how to paste to the pastebin.
@@ -450,29 +411,6 @@
dec_s = ''.join([ chr(ord(a) ^ ord(b)) for a, b in zip(enc_s, itertools.cycle(key)) ])
print dec_s
-# If we recognize one of the arguments on the command line as indicating a
-# different mechanism for handling tracebacks, we attach one of those handlers
-# and remove the argument from sys.argv.
-#
-# This fallback is for Paraview:
-if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
-# Now, we check.
-if "--paste" in sys.argv:
- sys.excepthook = paste_traceback
- del sys.argv[sys.argv.index("--paste")]
-elif "--paste-detailed" in sys.argv:
- sys.excepthook = paste_traceback_detailed
- del sys.argv[sys.argv.index("--paste-detailed")]
-elif "--detailed" in sys.argv:
- import cgitb; cgitb.enable(format="text")
- del sys.argv[sys.argv.index("--detailed")]
-elif "--rpdb" in sys.argv:
- sys.excepthook = rpdb.rpdb_excepthook
- del sys.argv[sys.argv.index("--rpdb")]
-elif "--detailed" in sys.argv:
- import cgitb; cgitb.enable(format="text")
- del sys.argv[sys.argv.index("--detailed")]
-
#
# Some exceptions
#
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -35,6 +35,12 @@
import numpy as na # For historical reasons
import numpy # In case anyone wishes to use it by name
+# This next item will handle most of the actual startup procedures, but it will
+# also attempt to parse the command line and set up the global state of various
+# operations.
+
+import yt.startup_tasks as __startup_tasks
+
from yt.funcs import *
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.performance_counters import yt_counters, time_function
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/startup_tasks.py
--- /dev/null
+++ b/yt/startup_tasks.py
@@ -0,0 +1,120 @@
+"""
+Very simple convenience function for importing all the modules, setting up
+the namespace and getting the last argument on the command line.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2011 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# This handles the command line.
+
+import argparse, os, sys
+
+from yt.config import ytcfg
+from yt.funcs import *
+
+exe_name = os.path.basename(sys.executable)
+# At import time, we determined whether or not we're being run in parallel.
+if exe_name in \
+ ["mpi4py", "embed_enzo",
+ "python"+sys.version[:3]+"-mpi"] \
+ or "--parallel" in sys.argv or '_parallel' in dir(sys) \
+ or any(["ipengine" in arg for arg in sys.argv]):
+ try:
+ from mpi4py import MPI
+ parallel_capable = (MPI.COMM_WORLD.size > 1)
+ except ImportError:
+ parallel_capable = False
+ if parallel_capable:
+ mylog.info("Global parallel computation enabled: %s / %s",
+ MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+ ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+ ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
+ ytcfg["yt","__parallel"] = "True"
+ if exe_name == "embed_enzo" or \
+ ("_parallel" in dir(sys) and sys._parallel == True):
+ ytcfg["yt","inline"] = "True"
+ # I believe we do not need to turn this off manually
+ #ytcfg["yt","StoreParameterFiles"] = "False"
+ # Now let's make sure we have the right options set.
+ if MPI.COMM_WORLD.rank > 0:
+ if ytcfg.getboolean("yt","LogFile"):
+ ytcfg["yt","LogFile"] = "False"
+ yt.utilities.logger.disable_file_logging()
+else:
+ parallel_capable = False
+
+# This fallback is for Paraview:
+
+# We use two signals, SIGUSR1 and SIGUSR2. In a non-threaded environment,
+# we set up handlers to process these by printing the current stack and to
+# raise a RuntimeError. The latter can be used, inside pdb, to catch an error
+# and then examine the current stack.
+try:
+ signal.signal(signal.SIGUSR1, signal_print_traceback)
+ mylog.debug("SIGUSR1 registered for traceback printing")
+ signal.signal(signal.SIGUSR2, signal_ipython)
+ mylog.debug("SIGUSR2 registered for IPython Insertion")
+except ValueError: # Not in main thread
+ pass
+
+class SetExceptionHandling(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string = None):
+ # If we recognize one of the arguments on the command line as indicating a
+ # different mechanism for handling tracebacks, we attach one of those handlers
+ # and remove the argument from sys.argv.
+ #
+ if self.dest == "paste":
+ sys.excepthook = paste_traceback
+ mylog.debug("Enabling traceback pasting")
+ elif self.dest == "paste-detailed":
+ sys.excepthook = paste_traceback_detailed
+ mylog.debug("Enabling detailed traceback pasting")
+ elif self.dest == "detailed":
+ import cgitb; cgitb.enable(format="text")
+ mylog.debug("Enabling detailed traceback reporting")
+ elif self.dest == "rpdb":
+ sys.excepthook = rpdb.rpdb_excepthook
+ mylog.debug("Enabling remote debugging")
+
+class SetConfigOption(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string = None):
+ param, val = values.split("=")
+ mylog.debug("Overriding config: %s = %s", param, val)
+ ytcfg["yt",param] = val
+
+parser = argparse.ArgumentParser(description = 'yt command line arguments')
+parser.add_argument("--config", action=SetConfigOption,
+ help = "Set configuration option, in the form param=value")
+parser.add_argument("--paste", action=SetExceptionHandling,
+ help = "Paste traceback to paste.yt-project.org", nargs = 0)
+parser.add_argument("--paste-detailed", action=SetExceptionHandling,
+ help = "Paste a detailed traceback with local variables to " +
+ "paste.yt-project.org", nargs = 0)
+parser.add_argument("--detailed", action=SetExceptionHandling,
+ help = "Display detailed traceback.", nargs = 0)
+parser.add_argument("--rpdb", action=SetExceptionHandling,
+ help = "Enable remote pdb interaction (for parallel debugging).", nargs = 0)
+
+if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
+
+if not ytcfg.getboolean("yt","__command_line"):
+ parser.parse_args()
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -23,6 +23,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+from yt.config import ytcfg
+ytcfg["yt","__command_line"] = "True"
from yt.mods import *
from yt.funcs import *
import cmdln as cmdln
diff -r 4ec73cabefb6bbf87f117ec671ecc21fa9eb7233 -r 64ee72664bb20977421c497cfd18e8cc446159af yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -39,49 +39,23 @@
from yt.utilities.amr_utils import \
QuadTree, merge_quadtrees
-exe_name = os.path.basename(sys.executable)
-# At import time, we determined whether or not we're being run in parallel.
-if exe_name in \
- ["mpi4py", "embed_enzo",
- "python"+sys.version[:3]+"-mpi"] \
- or "--parallel" in sys.argv or '_parallel' in dir(sys) \
- or any(["ipengine" in arg for arg in sys.argv]):
+parallel_capable = ytcfg.getboolean("yt", "__parallel")
+
+# Set up translation table and import things
+if parallel_capable:
from mpi4py import MPI
- parallel_capable = (MPI.COMM_WORLD.size > 1)
- if parallel_capable:
- mylog.info("Global parallel computation enabled: %s / %s",
- MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
- ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
- ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
- ytcfg["yt","__parallel"] = "True"
- if exe_name == "embed_enzo" or \
- ("_parallel" in dir(sys) and sys._parallel == True):
- ytcfg["yt","inline"] = "True"
- # I believe we do not need to turn this off manually
- #ytcfg["yt","StoreParameterFiles"] = "False"
- # Now let's make sure we have the right options set.
- if MPI.COMM_WORLD.rank > 0:
- if ytcfg.getboolean("yt","LogFile"):
- ytcfg["yt","LogFile"] = "False"
- yt.utilities.logger.disable_file_logging()
- yt.utilities.logger.uncolorize_logging()
- # Even though the uncolorize function already resets the format string,
- # we reset it again so that it includes the processor.
- f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
- yt.utilities.logger.ufstring))
- if len(yt.utilities.logger.rootLogger.handlers) > 0:
- yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
- if ytcfg.getboolean("yt", "parallel_traceback"):
- sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
+ yt.utilities.logger.uncolorize_logging()
+ # Even though the uncolorize function already resets the format string,
+ # we reset it again so that it includes the processor.
+ f = logging.Formatter("P%03i %s" % (MPI.COMM_WORLD.rank,
+ yt.utilities.logger.ufstring))
+ if len(yt.utilities.logger.rootLogger.handlers) > 0:
+ yt.utilities.logger.rootLogger.handlers[0].setFormatter(f)
+ if ytcfg.getboolean("yt", "parallel_traceback"):
+ sys.excepthook = traceback_writer_hook("_%03i" % MPI.COMM_WORLD.rank)
if ytcfg.getint("yt","LogLevel") < 20:
yt.utilities.logger.ytLogger.warning(
"Log Level is set low -- this could affect parallel performance!")
-
-else:
- parallel_capable = False
-
-# Set up translation table
-if parallel_capable:
dtype_names = dict(
float32 = MPI.FLOAT,
float64 = MPI.DOUBLE,
https://bitbucket.org/yt_analysis/yt/changeset/7ffc6f78a470/
changeset: 7ffc6f78a470
branch: yt
user: MatthewTurk
date: 2011-11-23 22:22:48
summary: Major refactoring of the command-line parser to use argparse instead of cmdln.
* Everything is now a subclass of YTCommand
* Adding new commands is clearer and easier
* Uses argparse, a python 2.7 library
* We can use --config something=another with it
* Removed all of the pasteboard
* Moved a bunch of the yt source handling into yt/funcs.py
* Removed pexpect.py
affected #: 4 files
diff -r 64ee72664bb20977421c497cfd18e8cc446159af -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -420,3 +420,103 @@
class YTEmptyClass(object):
pass
+
+def _update_hg(path, skip_rebuild = False):
+ from mercurial import hg, ui, commands
+ f = open(os.path.join(path, "yt_updater.log"), "a")
+ u = ui.ui()
+ u.pushbuffer()
+ config_fn = os.path.join(path, ".hg", "hgrc")
+ print "Reading configuration from ", config_fn
+ u.readconfig(config_fn)
+ repo = hg.repository(u, path)
+ commands.pull(u, repo)
+ f.write(u.popbuffer())
+ f.write("\n\n")
+ u.pushbuffer()
+ commands.identify(u, repo)
+ if "+" in u.popbuffer():
+ print "Can't rebuild modules by myself."
+ print "You will have to do this yourself. Here's a sample commands:"
+ print
+ print " $ cd %s" % (path)
+ print " $ hg up"
+ print " $ %s setup.py develop" % (sys.executable)
+ return 1
+ print "Updating the repository"
+ f.write("Updating the repository\n\n")
+ commands.update(u, repo, check=True)
+ if skip_rebuild: return
+ f.write("Rebuilding modules\n\n")
+ p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
+ stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ stdout, stderr = p.communicate()
+ f.write(stdout)
+ f.write("\n\n")
+ if p.returncode:
+ print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
+ sys.exit(1)
+ f.write("Successful!\n")
+ print "Updated successfully."
+
+def _get_hg_version(path):
+ from mercurial import hg, ui, commands
+ u = ui.ui()
+ u.pushbuffer()
+ repo = hg.repository(u, path)
+ commands.identify(u, repo)
+ return u.popbuffer()
+
+def _get_yt_version():
+ import pkg_resources
+ yt_provider = pkg_resources.get_provider("yt")
+ path = os.path.dirname(yt_provider.module_path)
+ version = _get_hg_version(path)[:12]
+ return version
+
+# This code snippet is modified from Georg Brandl
+def _bb_apicall(endpoint, data, use_pass = True):
+ import urllib, urllib2
+ uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
+ # since bitbucket doesn't return the required WWW-Authenticate header when
+ # making a request without Authorization, we cannot use the standard urllib2
+ # auth handlers; we have to add the requisite header from the start
+ if data is not None:
+ data = urllib.urlencode(data)
+ req = urllib2.Request(uri, data)
+ if use_pass:
+ username = raw_input("Bitbucket Username? ")
+ password = getpass.getpass()
+ upw = '%s:%s' % (username, password)
+ req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
+ return urllib2.urlopen(req).read()
+
+def _get_yt_supp():
+ supp_path = os.path.join(os.environ["YT_DEST"], "src",
+ "yt-supplemental")
+ # Now we check that the supplemental repository is checked out.
+ if not os.path.isdir(supp_path):
+ print
+ print "*** The yt-supplemental repository is not checked ***"
+ print "*** out. I can do this for you, but because this ***"
+ print "*** is a delicate act, I require you to respond ***"
+ print "*** to the prompt with the word 'yes'. ***"
+ print
+ response = raw_input("Do you want me to try to check it out? ")
+ if response != "yes":
+ print
+ print "Okay, I understand. You can check it out yourself."
+ print "This command will do it:"
+ print
+ print "$ hg clone http://hg.yt-project.org/yt-supplemental/ ",
+ print "%s" % (supp_path)
+ print
+ sys.exit(1)
+ rv = commands.clone(uu,
+ "http://hg.yt-project.org/yt-supplemental/", supp_path)
+ if rv:
+ print "Something has gone wrong. Quitting."
+ sys.exit(1)
+ # Now we think we have our supplemental repository.
+ return supp_path
+
diff -r 64ee72664bb20977421c497cfd18e8cc446159af -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -25,10 +25,10 @@
from yt.config import ytcfg
ytcfg["yt","__command_line"] = "True"
+from yt.startup_tasks import parser
from yt.mods import *
from yt.funcs import *
-import cmdln as cmdln
-import optparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
+import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
import urllib, urllib2, base64
def _fix_pf(arg):
@@ -42,13 +42,71 @@
pf = load(arg[:-10])
else:
pf = load(arg)
- if pf is None:
- raise IOError
return pf
+subparsers = parser.add_subparsers(title="subcommands",
+ description="Valid subcommands",)
+
+def _add_arg(sc, arg):
+ if isinstance(arg, types.StringTypes):
+ arg = _common_options[arg].copy()
+ argnames = []
+ if "short" in arg: argnames.append(arg.pop('short'))
+ if "long" in arg: argnames.append(arg.pop('long'))
+ sc.add_argument(*argnames, **arg)
+
+class YTCommand(object):
+ args = ()
+ name = None
+ description = ""
+ aliases = ()
+ npfs = 1
+
+ class __metaclass__(type):
+ def __init__(cls, name, b, d):
+ type.__init__(cls, name, b, d)
+ if cls.name is not None:
+ print "Adding", cls.name
+ sc = subparsers.add_parser(cls.name,
+ description = cls.description)
+ sc.set_defaults(func=cls.run)
+ for arg in cls.args:
+ _add_arg(sc, arg)
+
+ @classmethod
+ def run(cls, args):
+ self = cls()
+ # Some commands need to be run repeatedly on parameter files
+ # In fact, this is the rule and the opposite is the exception
+ # BUT, we only want to parse the arguments once.
+ if cls.npfs > 1:
+ self(args)
+ else:
+ if len(getattr(args, "pf", [])) > 1:
+ pfs = args.pf
+ for pf in pfs:
+ args.pf = pf
+ self(args)
+ else:
+ args.pf = args.pf[0]
+ self(args)
+
+class GetParameterFiles(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string = None):
+ if len(values) == 1:
+ pfs = values
+ elif len(values) == 2 and namespace.basename is not None:
+ pfs = ["%s%04i" % (opts.basename, r)
+ for r in range(int(values[0]), int(values[1]), opts.skip) ]
+ else:
+ pfs = values
+ namespace.pf = [_fix_pf(pf) for pf in pfs]
+
_common_options = dict(
+ pf = dict(short="pf", action=GetParameterFiles,
+ nargs="+", help="Parameter files to run on"),
axis = dict(short="-a", long="--axis",
- action="store", type="int",
+ action="store", type=int,
dest="axis", default=4,
help="Axis (4 for all three)"),
log = dict(short="-l", long="--log",
@@ -56,320 +114,183 @@
dest="takelog", default=True,
help="Take the log of the field?"),
text = dict(short="-t", long="--text",
- action="store", type="string",
+ action="store", type=str,
dest="text", default=None,
help="Textual annotation"),
field = dict(short="-f", long="--field",
- action="store", type="string",
+ action="store", type=str,
dest="field", default="Density",
help="Field to color by"),
weight = dict(short="-g", long="--weight",
- action="store", type="string",
+ action="store", type=str,
dest="weight", default=None,
help="Field to weight projections with"),
- cmap = dict(short="", long="--colormap",
- action="store", type="string",
+ cmap = dict(long="--colormap",
+ action="store", type=str,
dest="cmap", default="jet",
help="Colormap name"),
zlim = dict(short="-z", long="--zlim",
- action="store", type="float",
+ action="store", type=float,
dest="zlim", default=None,
nargs=2,
help="Color limits (min, max)"),
- dex = dict(short="", long="--dex",
- action="store", type="float",
+ dex = dict(long="--dex",
+ action="store", type=float,
dest="dex", default=None,
nargs=1,
help="Number of dex above min to display"),
width = dict(short="-w", long="--width",
- action="store", type="float",
+ action="store", type=float,
dest="width", default=1.0,
help="Width in specified units"),
unit = dict(short="-u", long="--unit",
- action="store", type="string",
+ action="store", type=str,
dest="unit", default='unitary',
help="Desired units"),
center = dict(short="-c", long="--center",
- action="store", type="float",
+ action="store", type=float,
dest="center", default=None,
nargs=3,
help="Center, space separated (-1 -1 -1 for max)"),
bn = dict(short="-b", long="--basename",
- action="store", type="string",
+ action="store", type=str,
dest="basename", default=None,
help="Basename of parameter files"),
output = dict(short="-o", long="--output",
- action="store", type="string",
+ action="store", type=str,
dest="output", default="frames/",
help="Folder in which to place output images"),
outputfn= dict(short="-o", long="--output",
- action="store", type="string",
+ action="store", type=str,
dest="output", default=None,
help="File in which to place output"),
skip = dict(short="-s", long="--skip",
- action="store", type="int",
+ action="store", type=int,
dest="skip", default=1,
help="Skip factor for outputs"),
proj = dict(short="-p", long="--projection",
action="store_true",
dest="projection", default=False,
help="Use a projection rather than a slice"),
- maxw = dict(short="", long="--max-width",
- action="store", type="float",
+ maxw = dict(long="--max-width",
+ action="store", type=float,
dest="max_width", default=1.0,
help="Maximum width in code units"),
- minw = dict(short="", long="--min-width",
- action="store", type="float",
+ minw = dict(long="--min-width",
+ action="store", type=float,
dest="min_width", default=50,
help="Minimum width in units of smallest dx (default: 50)"),
nframes = dict(short="-n", long="--nframes",
- action="store", type="int",
+ action="store", type=int,
dest="nframes", default=100,
help="Number of frames to generate"),
- slabw = dict(short="", long="--slab-width",
- action="store", type="float",
+ slabw = dict(long="--slab-width",
+ action="store", type=float,
dest="slab_width", default=1.0,
help="Slab width in specified units"),
slabu = dict(short="-g", long="--slab-unit",
- action="store", type="string",
+ action="store", type=str,
dest="slab_unit", default='1',
help="Desired units for the slab"),
- ptype = dict(short="", long="--particle-type",
- action="store", type="int",
+ ptype = dict(long="--particle-type",
+ action="store", type=int,
dest="ptype", default=2,
help="Particle type to select"),
- agecut = dict(short="", long="--age-cut",
- action="store", type="float",
+ agecut = dict(long="--age-cut",
+ action="store", type=float,
dest="age_filter", default=None,
nargs=2,
help="Bounds for the field to select"),
- uboxes = dict(short="", long="--unit-boxes",
+ uboxes = dict(long="--unit-boxes",
action="store_true",
dest="unit_boxes",
help="Display helpful unit boxes"),
- thresh = dict(short="", long="--threshold",
- action="store", type="float",
+ thresh = dict(long="--threshold",
+ action="store", type=float,
dest="threshold", default=None,
help="Density threshold"),
- dm_only = dict(short="", long="--all-particles",
+ dm_only = dict(long="--all-particles",
action="store_false",
dest="dm_only", default=True,
help="Use all particles"),
- grids = dict(short="", long="--show-grids",
+ grids = dict(long="--show-grids",
action="store_true",
dest="grids", default=False,
help="Show the grid boundaries"),
- time = dict(short="", long="--time",
+ time = dict(long="--time",
action="store_true",
dest="time", default=False,
help="Print time in years on image"),
- contours = dict(short="", long="--contours",
- action="store",type="int",
+ contours = dict(long="--contours",
+ action="store",type=int,
dest="contours", default=None,
help="Number of Contours for Rendering"),
- contour_width = dict(short="", long="--contour_width",
- action="store",type="float",
+ contour_width = dict(long="--contour_width",
+ action="store",type=float,
dest="contour_width", default=None,
help="Width of gaussians used for rendering."),
- enhance = dict(short="", long="--enhance",
+ enhance = dict(long="--enhance",
action="store_true",
dest="enhance", default=False,
help="Enhance!"),
valrange = dict(short="-r", long="--range",
- action="store", type="float",
+ action="store", type=float,
dest="valrange", default=None,
nargs=2,
help="Range, space separated"),
- up = dict(short="", long="--up",
- action="store", type="float",
+ up = dict(long="--up",
+ action="store", type=float,
dest="up", default=None,
nargs=3,
help="Up, space separated"),
- viewpoint = dict(short="", long="--viewpoint",
- action="store", type="float",
+ viewpoint = dict(long="--viewpoint",
+ action="store", type=float,
dest="viewpoint", default=[1., 1., 1.],
nargs=3,
help="Viewpoint, space separated"),
- pixels = dict(short="", long="--pixels",
- action="store",type="int",
+ pixels = dict(long="--pixels",
+ action="store",type=int,
dest="pixels", default=None,
help="Number of Pixels for Rendering"),
- halos = dict(short="", long="--halos",
- action="store", type="string",
+ halos = dict(long="--halos",
+ action="store", type=str,
dest="halos",default="multiple",
help="Run halo profiler on a 'single' halo or 'multiple' halos."),
- halo_radius = dict(short="", long="--halo_radius",
- action="store", type="float",
+ halo_radius = dict(long="--halo_radius",
+ action="store", type=float,
dest="halo_radius",default=0.1,
help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
- halo_radius_units = dict(short="", long="--halo_radius_units",
- action="store", type="string",
+ halo_radius_units = dict(long="--halo_radius_units",
+ action="store", type=str,
dest="halo_radius_units",default="1",
help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
- halo_hop_style = dict(short="", long="--halo_hop_style",
- action="store", type="string",
+ halo_hop_style = dict(long="--halo_hop_style",
+ action="store", type=str,
dest="halo_hop_style",default="new",
help="Style of hop output file. 'new' for yt_hop files and 'old' for enzo_hop files."),
- halo_parameter_file = dict(short="", long="--halo_parameter_file",
- action="store", type="string",
+ halo_parameter_file = dict(long="--halo_parameter_file",
+ action="store", type=str,
dest="halo_parameter_file",default=None,
help="HaloProfiler parameter file."),
- make_profiles = dict(short="", long="--make_profiles",
+ make_profiles = dict(long="--make_profiles",
action="store_true", default=False,
help="Make profiles with halo profiler."),
- make_projections = dict(short="", long="--make_projections",
+ make_projections = dict(long="--make_projections",
action="store_true", default=False,
help="Make projections with halo profiler.")
)
-def _add_options(parser, *options):
- for opt in options:
- oo = _common_options[opt].copy()
- parser.add_option(oo.pop("short"), oo.pop("long"), **oo)
-def _get_parser(*options):
- parser = optparse.OptionParser()
- _add_options(parser, *options)
- return parser
-
-def add_cmd_options(options):
- opts = []
- for option in options:
- vals = _common_options[option].copy()
- opts.append(([vals.pop("short"), vals.pop("long")],
- vals))
- def apply_options(func):
- for args, kwargs in opts:
- func = cmdln.option(*args, **kwargs)(func)
- return func
- return apply_options
-
-def check_args(func):
- @wraps(func)
- def arg_iterate(self, subcmd, opts, *args):
- if len(args) == 1:
- pfs = args
- elif len(args) == 2 and opts.basename is not None:
- pfs = ["%s%04i" % (opts.basename, r)
- for r in range(int(args[0]), int(args[1]), opts.skip) ]
- else: pfs = args
- for arg in pfs:
- func(self, subcmd, opts, arg)
- return arg_iterate
-
-def _update_hg(path, skip_rebuild = False):
- from mercurial import hg, ui, commands
- f = open(os.path.join(path, "yt_updater.log"), "a")
- u = ui.ui()
- u.pushbuffer()
- config_fn = os.path.join(path, ".hg", "hgrc")
- print "Reading configuration from ", config_fn
- u.readconfig(config_fn)
- repo = hg.repository(u, path)
- commands.pull(u, repo)
- f.write(u.popbuffer())
- f.write("\n\n")
- u.pushbuffer()
- commands.identify(u, repo)
- if "+" in u.popbuffer():
- print "Can't rebuild modules by myself."
- print "You will have to do this yourself. Here's a sample commands:"
- print
- print " $ cd %s" % (path)
- print " $ hg up"
- print " $ %s setup.py develop" % (sys.executable)
- return 1
- print "Updating the repository"
- f.write("Updating the repository\n\n")
- commands.update(u, repo, check=True)
- if skip_rebuild: return
- f.write("Rebuilding modules\n\n")
- p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
- stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
- stdout, stderr = p.communicate()
- f.write(stdout)
- f.write("\n\n")
- if p.returncode:
- print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
- sys.exit(1)
- f.write("Successful!\n")
- print "Updated successfully."
-
-def _get_hg_version(path):
- from mercurial import hg, ui, commands
- u = ui.ui()
- u.pushbuffer()
- repo = hg.repository(u, path)
- commands.identify(u, repo)
- return u.popbuffer()
-
-def get_yt_version():
- import pkg_resources
- yt_provider = pkg_resources.get_provider("yt")
- path = os.path.dirname(yt_provider.module_path)
- version = _get_hg_version(path)[:12]
- return version
-
-# This code snippet is modified from Georg Brandl
-def bb_apicall(endpoint, data, use_pass = True):
- uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
- # since bitbucket doesn't return the required WWW-Authenticate header when
- # making a request without Authorization, we cannot use the standard urllib2
- # auth handlers; we have to add the requisite header from the start
- if data is not None:
- data = urllib.urlencode(data)
- req = urllib2.Request(uri, data)
- if use_pass:
- username = raw_input("Bitbucket Username? ")
- password = getpass.getpass()
- upw = '%s:%s' % (username, password)
- req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
- return urllib2.urlopen(req).read()
-
-def _get_yt_supp():
- supp_path = os.path.join(os.environ["YT_DEST"], "src",
- "yt-supplemental")
- # Now we check that the supplemental repository is checked out.
- if not os.path.isdir(supp_path):
- print
- print "*** The yt-supplemental repository is not checked ***"
- print "*** out. I can do this for you, but because this ***"
- print "*** is a delicate act, I require you to respond ***"
- print "*** to the prompt with the word 'yes'. ***"
- print
- response = raw_input("Do you want me to try to check it out? ")
- if response != "yes":
- print
- print "Okay, I understand. You can check it out yourself."
- print "This command will do it:"
- print
- print "$ hg clone http://hg.yt-project.org/yt-supplemental/ ",
- print "%s" % (supp_path)
- print
- sys.exit(1)
- rv = commands.clone(uu,
- "http://hg.yt-project.org/yt-supplemental/", supp_path)
- if rv:
- print "Something has gone wrong. Quitting."
- sys.exit(1)
- # Now we think we have our supplemental repository.
- return supp_path
-
-class YTCommands(cmdln.Cmdln):
- name="yt"
-
- def __init__(self, *args, **kwargs):
- cmdln.Cmdln.__init__(self, *args, **kwargs)
- cmdln.Cmdln.do_help.aliases.append("h")
-
- def do_update(self, subcmd, opts):
+class YTUpdateCmd(YTCommand):
+ name = "update"
+ description = \
"""
Update the yt installation to the most recent version
- ${cmd_usage}
- ${cmd_option_list}
"""
+
+ def __call__(self, opts):
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
@@ -405,19 +326,24 @@
print "updating to the newest changeset."
print
- @cmdln.option("-u", "--update-source", action="store_true",
- default = False,
- help="Update the yt installation, if able")
- @cmdln.option("-o", "--output-version", action="store",
+class YTInstInfoCmd(YTCommand):
+ name = "instinfo"
+ args = (
+ dict(short="-u", long="--update-source", action="store_true",
+ default = False,
+ help="Update the yt installation, if able"),
+ dict(short="-o", long="--output-version", action="store",
default = None, dest="outputfile",
- help="File into which the current revision number will be stored")
- def do_instinfo(self, subcmd, opts):
+ help="File into which the current revision number will be" +
+ "stored")
+ )
+ description = \
"""
Get some information about the yt installation
- ${cmd_usage}
- ${cmd_option_list}
"""
+
+ def __call__(self, opts):
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
@@ -456,15 +382,18 @@
if vstring is not None and opts.outputfile is not None:
open(opts.outputfile, "w").write(vstring)
- def do_load(self, subcmd, opts, arg):
+class YTLoadCmd(YTCommand):
+ name = "load"
+ description = \
"""
Load a single dataset into an IPython instance
- ${cmd_option_list}
"""
- try:
- pf = _fix_pf(arg)
- except IOError:
+
+ args = ("pf", )
+
+ def __call__(self, args):
+ if args.pf is None:
print "Could not load file."
sys.exit()
import yt.mods
@@ -492,159 +421,176 @@
from IPython.frontend.terminal.embed import InteractiveShellEmbed
ipshell = InteractiveShellEmbed(config=cfg)
- @add_cmd_options(['outputfn','bn','thresh','dm_only','skip'])
- @check_args
- def do_hop(self, subcmd, opts, arg):
+class YTHopCmd(YTCommand):
+ args = ('outputfn','bn','thresh','dm_only','skip', 'pf')
+ name = "hop"
+ description = \
"""
Run HOP on one or more datasets
- ${cmd_option_list}
"""
- pf = _fix_pf(arg)
- kwargs = {'dm_only' : opts.dm_only}
- if opts.threshold is not None: kwargs['threshold'] = opts.threshold
+
+ def __call__(self, args):
+ pf = args.pf
+ kwargs = {'dm_only' : args.dm_only}
+ if args.threshold is not None: kwargs['threshold'] = args.threshold
hop_list = HaloFinder(pf, **kwargs)
- if opts.output is None: fn = "%s.hop" % pf
- else: fn = opts.output
+ if args.output is None: fn = "%s.hop" % pf
+ else: fn = args.output
hop_list.write_out(fn)
- @add_cmd_options(['make_profiles','make_projections','halo_parameter_file',
- 'halos','halo_hop_style','halo_radius','halo_radius_units'])
- def do_halos(self, subcmd, opts, arg):
+class YTHalosCmd(YTCommand):
+ name = "halos"
+ args = ('make_profiles','make_projections','halo_parameter_file',
+ 'halos','halo_hop_style','halo_radius','halo_radius_units', 'pf')
+ description = \
"""
Run HaloProfiler on one dataset
- ${cmd_option_list}
"""
+ def __call__(self, args):
import yt.analysis_modules.halo_profiler.api as HP
- kwargs = {'halos': opts.halos,
- 'halo_radius': opts.halo_radius,
- 'radius_units': opts.halo_radius_units}
+ kwargs = {'halos': args.halos,
+ 'halo_radius': args.halo_radius,
+ 'radius_units': args.halo_radius_units}
- hp = HP.HaloProfiler(arg,opts.halo_parameter_file,**kwargs)
- if opts.make_profiles:
+ hp = HP.HaloProfiler(arg,args.halo_parameter_file,**kwargs)
+ if args.make_profiles:
hp.make_profiles()
- if opts.make_projections:
+ if args.make_projections:
hp.make_projections()
- @add_cmd_options(["width", "unit", "bn", "proj", "center",
- "zlim", "axis", "field", "weight", "skip",
- "cmap", "output", "grids", "time"])
- @check_args
- def do_plot(self, subcmd, opts, arg):
+class YTPlotCmd(YTCommand):
+ args = ("width", "unit", "bn", "proj", "center",
+ "zlim", "axis", "field", "weight", "skip",
+ "cmap", "output", "grids", "time", "pf")
+ name = "plot"
+
+ description = \
"""
Create a set of images
- ${cmd_usage}
- ${cmd_option_list}
"""
- pf = _fix_pf(arg)
- center = opts.center
- if opts.center == (-1,-1,-1):
+
+ def __call__(self, args):
+ pf = args.pf
+ center = args.center
+ if args.center == (-1,-1,-1):
mylog.info("No center fed in; seeking.")
v, center = pf.h.find_max("Density")
- elif opts.center is None:
+ elif args.center is None:
center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
center = na.array(center)
pc=PlotCollection(pf, center=center)
- if opts.axis == 4:
+ if args.axis == 4:
axes = range(3)
else:
- axes = [opts.axis]
+ axes = [args.axis]
for ax in axes:
mylog.info("Adding plot for axis %i", ax)
- if opts.projection: pc.add_projection(opts.field, ax,
- weight_field=opts.weight, center=center)
- else: pc.add_slice(opts.field, ax, center=center)
- if opts.grids: pc.plots[-1].modify["grids"]()
- if opts.time:
+ if args.projection: pc.add_projection(args.field, ax,
+ weight_field=args.weight, center=center)
+ else: pc.add_slice(args.field, ax, center=center)
+ if args.grids: pc.plots[-1].modify["grids"]()
+ if args.time:
time = pf.current_time*pf['Time']*pf['years']
pc.plots[-1].modify["text"]((0.2,0.8), 't = %5.2e yr'%time)
- pc.set_width(opts.width, opts.unit)
- pc.set_cmap(opts.cmap)
- if opts.zlim: pc.set_zlim(*opts.zlim)
- if not os.path.isdir(opts.output): os.makedirs(opts.output)
- pc.save(os.path.join(opts.output,"%s" % (pf)))
+ pc.set_width(args.width, args.unit)
+ pc.set_cmap(args.cmap)
+ if args.zlim: pc.set_zlim(*args.zlim)
+ if not os.path.isdir(args.output): os.makedirs(args.output)
+ pc.save(os.path.join(args.output,"%s" % (pf)))
- @add_cmd_options(["proj", "field", "weight"])
- @cmdln.option("-a", "--axis", action="store", type="int",
- dest="axis", default=0, help="Axis (4 for all three)")
- @cmdln.option("-o", "--host", action="store", type="string",
+class YTMapserverCmd(YTCommand):
+ args = ("proj", "field", "weight",
+ dict(short="-a", long="--axis", action="store", type=int,
+ dest="axis", default=0, help="Axis (4 for all three)"),
+ dict(short ="-o", long="--host", action="store", type=str,
dest="host", default=None, help="IP Address to bind on")
- @check_args
- def do_mapserver(self, subcmd, opts, arg):
+ )
+
+ name = "mapserver"
+ description = \
"""
Serve a plot in a GMaps-style interface
- ${cmd_usage}
- ${cmd_option_list}
"""
- pf = _fix_pf(arg)
+
+ def __call__(self, args):
+ pf = args.pf
pc=PlotCollection(pf, center=0.5*(pf.domain_left_edge +
pf.domain_right_edge))
- if opts.axis == 4:
+ if args.axis == 4:
print "Doesn't work with multiple axes!"
return
- if opts.projection:
- p = pc.add_projection(opts.field, opts.axis, weight_field=opts.weight)
+ if args.projection:
+ p = pc.add_projection(args.field, args.axis, weight_field=args.weight)
else:
- p = pc.add_slice(opts.field, opts.axis)
+ p = pc.add_slice(args.field, args.axis)
from yt.gui.reason.pannable_map import PannableMapServer
- mapper = PannableMapServer(p.data, opts.field)
+ mapper = PannableMapServer(p.data, args.field)
import yt.utilities.bottle as bottle
bottle.debug(True)
- if opts.host is not None:
- colonpl = opts.host.find(":")
+ if args.host is not None:
+ colonpl = args.host.find(":")
if colonpl >= 0:
- port = int(opts.host.split(":")[-1])
- opts.host = opts.host[:colonpl]
+ port = int(args.host.split(":")[-1])
+ args.host = args.host[:colonpl]
else:
port = 8080
- bottle.run(server='rocket', host=opts.host, port=port)
+ bottle.run(server='rocket', host=args.host, port=port)
else:
bottle.run(server='rocket')
- def do_rpdb(self, subcmd, opts, task):
+class YTRPDBCmd(YTCommand):
+ name = "rpdb"
+ description = \
"""
Connect to a currently running (on localhost) rpd session.
Commands run with --rpdb will trigger an rpdb session with any
uncaught exceptions.
- ${cmd_usage}
- ${cmd_option_list}
"""
+
+ def __call__(self, args):
import rpdb
rpdb.run_rpdb(int(task))
- @add_cmd_options(['outputfn','bn','skip'])
- @check_args
- def do_stats(self, subcmd, opts, arg):
+class YTStatsCmd(YTCommand):
+ args = ('outputfn','bn','skip','pf')
+ name = "stats"
+ description = \
"""
Print stats and maximum density for one or more datasets
- ${cmd_option_list}
"""
- pf = _fix_pf(arg)
+
+ def __call__(self, args):
+ pf = args.pf
pf.h.print_stats()
if "Density" in pf.h.field_list:
v, c = pf.h.find_max("Density")
print "Maximum density: %0.5e at %s" % (v, c)
- if opts.output is not None:
+ if args.output is not None:
t = pf.current_time * pf['years']
- open(opts.output, "a").write(
+ open(args.output, "a").write(
"%s (%0.5e years): %0.5e at %s\n" % (pf, t, v, c))
- @add_cmd_options([])
- def _do_analyze(self, subcmd, opts, arg):
+class YTAnalyzeCmd(YTCommand):
+
+ name = "analyze"
+ args = ('pf',)
+ description = \
"""
Produce a set of analysis for a given output. This includes
HaloProfiler results with r200, as per the recipe file in the cookbook,
profiles of a number of fields, projections of average Density and
Temperature, and distribution functions for Density and Temperature.
- ${cmd_option_list}
"""
+
+ def __call__(self, args):
# We will do the following things:
# Halo profiling (default parameters ONLY)
# Projections: Density, Temperature
@@ -697,51 +643,48 @@
ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
pc.save()
- @cmdln.option("-d", "--desc", action="store",
- default = None, dest="desc",
- help="Description for this pasteboard entry")
- def do_pasteboard(self, subcmd, opts, arg):
- """
- Place a file into your pasteboard.
- """
- if opts.desc is None: raise RuntimeError
- from yt.utilities.pasteboard import PostInventory
- pp = PostInventory()
- pp.add_post(arg, desc=opts.desc)
-
- @cmdln.option("-l", "--language", action="store",
+class YTPastebinCmd(YTCommand):
+ name = "pastebin"
+ args = (
+ dict(short="-l", long="--language", action="store",
default = None, dest="language",
- help="Use syntax highlighter for the file in language")
- @cmdln.option("-L", "--languages", action="store_true",
+ help="Use syntax highlighter for the file in language"),
+ dict(short="-L", long="--languages", action="store_true",
default = False, dest="languages",
- help="Retrive a list of supported languages")
- @cmdln.option("-e", "--encoding", action="store",
+ help="Retrive a list of supported languages"),
+ dict(short="-e", long="--encoding", action="store",
default = 'utf-8', dest="encoding",
help="Specify the encoding of a file (default is "
- "utf-8 or guessing if available)")
- @cmdln.option("-b", "--open-browser", action="store_true",
+ "utf-8 or guessing if available)"),
+ dict(short="-b", long="--open-browser", action="store_true",
default = False, dest="open_browser",
- help="Open the paste in a web browser")
- @cmdln.option("-p", "--private", action="store_true",
+ help="Open the paste in a web browser"),
+ dict(short="-p", long="--private", action="store_true",
default = False, dest="private",
- help="Paste as private")
- @cmdln.option("-c", "--clipboard", action="store_true",
+ help="Paste as private"),
+ dict(short="-c", long="--clipboard", action="store_true",
default = False, dest="clipboard",
- help="File to output to; else, print.")
- def do_pastebin(self, subcmd, opts, arg):
+ help="File to output to; else, print."),
+ dict(short="file", type=str),
+ )
+ description = \
"""
Post a script to an anonymous pastebin
Usage: yt pastebin [options] <script>
- ${cmd_option_list}
"""
+
+ def __call__(self, args):
import yt.utilities.lodgeit as lo
- lo.main( arg, languages=opts.languages, language=opts.language,
- encoding=opts.encoding, open_browser=opts.open_browser,
- private=opts.private, clipboard=opts.clipboard)
+ lo.main(args.file, languages=args.languages, language=args.language,
+ encoding=args.encoding, open_browser=args.open_browser,
+ private=args.private, clipboard=args.clipboard)
- def do_pastebin_grab(self, subcmd, opts, arg):
+class YTPastebinGrabCmd(YTCommand):
+ args = (dict(short="number", type=str),)
+ name = "pastebin_grab"
+ description = \
"""
Print an online pastebin to STDOUT for local use. Paste ID is
the number at the end of the url. So to locally access pastebin:
@@ -751,29 +694,21 @@
Ex: yt pastebin_grab 1688 > script.py
"""
+
+ def __call__(self, args):
import yt.utilities.lodgeit as lo
- lo.main( None, download=arg )
+ lo.main( None, download=args.number )
- @cmdln.option("-o", "--output", action="store",
- default = None, dest="output_fn",
- help="File to output to; else, print.")
- def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
- """
- Download from your or another user's pasteboard
- ${cmd_usage}
- ${cmd_option_list}
- """
- from yt.utilities.pasteboard import retrieve_pastefile
- retrieve_pastefile(username, paste_id, opts.output_fn)
-
- def do_bugreport(self, subcmd, opts):
+class YTBugreportCmd(YTCommand):
+ name = "bureport"
+ description = \
"""
Report a bug in yt
- ${cmd_usage}
- ${cmd_option_list}
"""
+
+ def __call__(self, args):
print "==============================================================="
print
print "Hi there! Welcome to the yt bugreport taker."
@@ -805,7 +740,7 @@
print "projections')"
print
try:
- current_version = get_yt_version()
+ current_version = _get_yt_version()
except:
current_version = "Unavailable"
summary = raw_input("Summary? ")
@@ -866,7 +801,7 @@
print "If you don't have one, run the 'yt bootstrap_dev' command."
print
loki = raw_input()
- retval = bb_apicall(endpoint, data, use_pass=True)
+ retval = _bb_apicall(endpoint, data, use_pass=True)
import json
retval = json.loads(retval)
url = "http://hg.yt-project.org/yt/issue/%s" % retval['local_id']
@@ -880,13 +815,13 @@
print "Keep in touch!"
print
- def do_bootstrap_dev(self, subcmd, opts):
+class YTBootstrapDevCmd(YTCommand):
+ name = "bootstrap_dev"
+ description = \
"""
Bootstrap a yt development environment
-
- ${cmd_usage}
- ${cmd_option_list}
"""
+ def __call__(self, args):
from mercurial import hg, ui, commands
import imp
import getpass
@@ -896,7 +831,7 @@
print "Hi there! Welcome to the yt development bootstrap tool."
print
print "This should get you started with mercurial as well as a few"
- print "other handy things, like a pasteboard of your very own."
+ print "other handy things"
print
# We have to do a couple things.
# First, we check that YT_DEST is set.
@@ -918,7 +853,6 @@
print " 1. Setting up your ~/.hgrc to have a username."
print " 2. Setting up your bitbucket user account and the hgbb"
print " extension."
- print " 3. Setting up a new pasteboard repository."
print
firstname = lastname = email_address = bbusername = repo_list = None
# Now we try to import the cedit extension.
@@ -1091,89 +1025,6 @@
# We now reload the UI's config file so that it catches the [bb]
# section changes.
uu.readconfig(hgrc_path[0])
- # Now the only thing remaining to do is to set up the pasteboard
- # repository.
- # This is, unfortunately, the most difficult.
- print
- print "We are now going to set up a pasteboard. This is a mechanism"
- print "for versioned posting of snippets, collaboration and"
- print "discussion."
- print
- # Let's get the full list of repositories
- pasteboard_name = "%s.bitbucket.org" % (bbusername.lower())
- if repo_list is None:
- rv = hgbb._bb_apicall(uu, "users/%s" % bbusername, None, False)
- rv = json.loads(rv)
- repo_list = rv['repositories']
- create = True
- for repo in repo_list:
- if repo['name'] == pasteboard_name:
- create = False
- if create:
- # Now we first create the repository, but we
- # will only use the creation API, not the bbcreate command.
- print
- print "I am now going to create the repository:"
- print " ", pasteboard_name
- print "on BitBucket.org. This will set up the domain"
- print " http://%s" % (pasteboard_name)
- print "which will point to the current contents of the repo."
- print
- loki = raw_input("Press enter to go on, Ctrl-C to exit.")
- data = dict(name=pasteboard_name)
- hgbb._bb_apicall(uu, 'repositories', data)
- # Now we clone
- pasteboard_path = os.path.join(os.environ["YT_DEST"], "src",
- pasteboard_name)
- if os.path.isdir(pasteboard_path):
- print "Found an existing clone of the pasteboard repo:"
- print " ", pasteboard_path
- else:
- print
- print "I will now clone a copy of your pasteboard repo."
- print
- loki = raw_input("Press enter to go on, Ctrl-C to exit.")
- commands.clone(uu, "https://%s@bitbucket.org/%s/%s" % (
- bbusername, bbusername, pasteboard_name),
- pasteboard_path)
- pbtemplate_path = os.path.join(supp_path, "pasteboard_template")
- pb_hgrc_path = os.path.join(pasteboard_path, ".hg", "hgrc")
- cedit.config.setoption(uu, [pb_hgrc_path],
- "paths.pasteboard = " + pbtemplate_path)
- if create:
- # We have to pull in the changesets from the pasteboard.
- pb_repo = hg.repository(uu, pasteboard_path)
- commands.pull(uu, pb_repo,
- os.path.join(supp_path, "pasteboard_template"))
- if ytcfg.get("yt","pasteboard_repo") != pasteboard_path:
- print
- print "Now setting the pasteboard_repo option in"
- print "~/.yt/config to point to %s" % (pasteboard_path)
- print
- loki = raw_input("Press enter to go on, Ctrl-C to exit.")
- dotyt_path = os.path.expanduser("~/.yt")
- if not os.path.isdir(dotyt_path):
- print "There's no directory:"
- print " ", dotyt_path
- print "I will now create it."
- print
- loki = raw_input("Press enter to go on, Ctrl-C to exit.")
- os.mkdir(dotyt_path)
- ytcfg_path = os.path.expanduser("~/.yt/config")
- cedit.config.setoption(uu, [ytcfg_path],
- "yt.pasteboard_repo=%s" % (pasteboard_path))
- try:
- import pygments
- install_pygments = False
- except ImportError:
- install_pygments = True
- if install_pygments:
- print "You are missing the Pygments package. Installing."
- import pip
- rv = pip.main(["install", "pygments"])
- if rv == 1:
- print "Unable to install Pygments. Please report this bug to yt-users."
- sys.exit(1)
try:
import lxml
install_lxml = False
@@ -1189,27 +1040,32 @@
print
print "All done!"
print
- print "You're now set up to use the 'yt pasteboard' command"
- print "as well as develop using Mercurial and BitBucket."
+ print "You're now set up to develop using Mercurial and BitBucket."
print
print "Good luck!"
- @cmdln.option("-o", "--open-browser", action="store_true",
- default = False, dest='open_browser',
- help="Open a web browser.")
- @cmdln.option("-p", "--port", action="store",
- default = 0, dest='port',
- help="Port to listen on")
- @cmdln.option("-f", "--find", action="store_true",
- default = False, dest="find",
- help="At startup, find all *.hierarchy files in the CWD")
- @cmdln.option("-d", "--debug", action="store_true",
- default = False, dest="debug",
- help="Add a debugging mode for cell execution")
- def do_serve(self, subcmd, opts):
+class YTServeCmd(YTCommand):
+ name = "serve"
+ args = (
+ dict(short="-o", long="--open-browser", action="store_true",
+ default = False, dest='open_browser',
+ help="Open a web browser."),
+ dict(short="-p", long="--port", action="store",
+ default = 0, dest='port',
+ help="Port to listen on"),
+ dict(short="-f", long="--find", action="store_true",
+ default = False, dest="find",
+ help="At startup, find all *.hierarchy files in the CWD"),
+ dict(short="-d", long="--debug", action="store_true",
+ default = False, dest="debug",
+ help="Add a debugging mode for cell execution")
+ )
+ description = \
"""
Run the Web GUI Reason
"""
+
+ def __call__(self, args):
# We have to do a couple things.
# First, we check that YT_DEST is set.
if "YT_DEST" not in os.environ:
@@ -1218,18 +1074,18 @@
print "*** to point to the installation location! ***"
print
sys.exit(1)
- if opts.port == 0:
+ if args.port == 0:
# This means, choose one at random. We do this by binding to a
# socket and allowing the OS to choose the port for that socket.
import socket
sock = socket.socket()
sock.bind(('', 0))
- opts.port = sock.getsockname()[-1]
+ args.port = sock.getsockname()[-1]
del sock
- elif opts.port == '-1':
+ elif args.port == '-1':
port = raw_input("Desired yt port? ")
try:
- opts.port = int(port)
+ args.port = int(port)
except ValueError:
print "Please try a number next time."
return 1
@@ -1247,78 +1103,32 @@
from yt.gui.reason.extdirect_repl import ExtDirectREPL
from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
hr = ExtDirectREPL(base_extjs_path)
- hr.debug = PayloadHandler.debug = opts.debug
- if opts.find:
+ hr.debug = PayloadHandler.debug = args.debug
+ if args.find:
# We just have to find them and store references to them.
command_line = ["pfs = []"]
for fn in sorted(glob.glob("*/*.hierarchy")):
command_line.append("pfs.append(load('%s'))" % fn[:-10])
hr.execute("\n".join(command_line))
bottle.debug()
- uuid_serve_functions(open_browser=opts.open_browser,
- port=int(opts.port), repl=hr)
+ uuid_serve_functions(open_browser=args.open_browser,
+ port=int(args.port), repl=hr)
- def _do_remote(self, subcmd, opts):
- import getpass, sys, socket, time, webbrowser
- import yt.utilities.pexpect as pex
-
- host = raw_input('Hostname: ')
- user = raw_input('User: ')
- password = getpass.getpass('Password: ')
-
- sock = socket.socket()
- sock.bind(('', 0))
- port = sock.getsockname()[-1]
- del sock
-
- child = pex.spawn('ssh -L %s:localhost:%s -l %s %s'%(port, port, user, host))
- ssh_newkey = 'Are you sure you want to continue connecting'
- i = child.expect([pex.TIMEOUT, ssh_newkey, 'password: '])
- if i == 0: # Timeout
- print 'ERROR!'
- print 'SSH could not login. Here is what SSH said:'
- print child.before, child.after
- return 1
- if i == 1: # SSH does not have the public key. Just accept it.
- child.sendline ('yes')
- child.expect ('password: ')
- i = child.expect([pex.TIMEOUT, 'password: '])
- if i == 0: # Timeout
- print 'ERROR!'
- print 'SSH could not login. Here is what SSH said:'
- print child.before, child.after
- return 1
- print "Sending password"
- child.sendline(password)
- del password
- print "Okay, sending serving command"
- child.sendline('yt serve -p -1')
- print "Waiting ..."
- child.expect('Desired yt port?')
- child.sendline("%s" % port)
- child.expect(' http://localhost:([0-9]*)/(.+)/\r')
- print "Got:", child.match.group(1), child.match.group(2)
- port, urlprefix = child.match.group(1), child.match.group(2)
- print "Sleeping one second and opening browser"
- time.sleep(1)
- webbrowser.open("http://localhost:%s/%s/" % (port, urlprefix))
- print "Press Ctrl-C to terminate session"
- child.readlines()
- while 1:
- time.sleep(1)
-
- @cmdln.option("-R", "--repo", action="store", type="string",
- dest="repo", default=".", help="Repository to upload")
- def do_hubsubmit(self, subcmd, opts):
+class YTHubSubmitCmd(YTCommand):
+ name = "hub_submit"
+ args = (
+ dict(long="--repo", action="store", type=str,
+ dest="repo", default=".", help="Repository to upload"),
+ )
+ description = \
"""
Submit a mercurial repository to the yt Hub
(http://hub.yt-project.org/), creating a BitBucket repo in the process
if necessary.
+ """
- ${cmd_usage}
- ${cmd_option_list}
- """
+ def __call__(self, args):
import imp
from mercurial import hg, ui, commands, error, config
uri = "http://hub.yt-project.org/3rdparty/API/api.php"
@@ -1341,10 +1151,10 @@
hgbb = imp.load_module("hgbb", *result)
uu = ui.ui()
try:
- repo = hg.repository(uu, opts.repo)
+ repo = hg.repository(uu, args.repo)
conf = config.config()
- if os.path.exists(os.path.join(opts.repo,".hg","hgrc")):
- conf.read(os.path.join(opts.repo, ".hg", "hgrc"))
+ if os.path.exists(os.path.join(args.repo,".hg","hgrc")):
+ conf.read(os.path.join(args.repo, ".hg", "hgrc"))
needs_bb = True
if "paths" in conf.sections():
default = conf['paths'].get("default", "")
@@ -1359,7 +1169,7 @@
break
except error.RepoError:
print "Unable to find repo at:"
- print " %s" % (os.path.abspath(opts.repo))
+ print " %s" % (os.path.abspath(args.repo))
print
print "Would you like to initialize one? If this message"
print "surprises you, you should perhaps press Ctrl-C to quit."
@@ -1370,8 +1180,8 @@
print "Okay, rad -- we'll let you handle it and get back to",
print " us."
return 1
- commands.init(uu, dest=opts.repo)
- repo = hg.repository(uu, opts.repo)
+ commands.init(uu, dest=args.repo)
+ repo = hg.repository(uu, args.repo)
commands.add(uu, repo)
commands.commit(uu, repo, message="Initial automated import by yt")
needs_bb = True
@@ -1396,7 +1206,7 @@
print
print "to get set up and ready to go."
return 1
- bb_repo_name = os.path.basename(os.path.abspath(opts.repo))
+ bb_repo_name = os.path.basename(os.path.abspath(args.repo))
print
print "I am now going to create the repository:"
print " ", bb_repo_name
@@ -1479,13 +1289,16 @@
rv = urllib2.urlopen(req).read()
print rv
- def do_upload_image(self, subcmd, opts, filename):
+class YTUploadImageCmd(YTCommand):
+ args = (dict(short="file", type=str))
+ description = \
"""
Upload an image to imgur.com. Must be PNG.
- ${cmd_usage}
- ${cmd_option_list}
"""
+
+ def __call__(self, args):
+ filename = args.file
if not filename.endswith(".png"):
print "File must be a PNG file!"
return 1
@@ -1517,56 +1330,57 @@
print
pprint.pprint(rv)
- @add_cmd_options(["width", "unit", "center","enhance",'outputfn',
- "field", "cmap", "contours", "viewpoint",
- "pixels","up","valrange","log","contour_width"])
- @check_args
- def do_render(self, subcmd, opts, arg):
+class YTRenderCmd(YTCommand):
+
+ args = ("width", "unit", "center","enhance",'outputfn',
+ "field", "cmap", "contours", "viewpoint",
+ "pixels","up","valrange","log","contour_width", "pf")
+ name = "render"
+ description = \
"""
Create a simple volume rendering
+ """
- ${cmd_usage}
- ${cmd_option_list}
- """
- pf = _fix_pf(arg)
- center = opts.center
- if opts.center == (-1,-1,-1):
+ def __call__(self, args):
+ pf = args.pf
+ center = args.center
+ if args.center == (-1,-1,-1):
mylog.info("No center fed in; seeking.")
v, center = pf.h.find_max("Density")
- elif opts.center is None:
+ elif args.center is None:
center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
center = na.array(center)
- L = opts.viewpoint
+ L = args.viewpoint
if L is None:
L = [1.]*3
- L = na.array(opts.viewpoint)
+ L = na.array(args.viewpoint)
- unit = opts.unit
+ unit = args.unit
if unit is None:
unit = '1'
- width = opts.width
+ width = args.width
if width is None:
width = 0.5*(pf.domain_right_edge - pf.domain_left_edge)
width /= pf[unit]
- N = opts.pixels
+ N = args.pixels
if N is None:
N = 512
- up = opts.up
+ up = args.up
if up is None:
up = [0.,0.,1.]
- field = opts.field
+ field = args.field
if field is None:
field = 'Density'
- log = opts.takelog
+ log = args.takelog
if log is None:
log = True
- myrange = opts.valrange
+ myrange = args.valrange
if myrange is None:
roi = pf.h.region(center, center-width, center+width)
mi, ma = roi.quantities['Extrema'](field)[0]
@@ -1575,13 +1389,13 @@
else:
mi, ma = myrange[0], myrange[1]
- n_contours = opts.contours
+ n_contours = args.contours
if n_contours is None:
n_contours = 7
- contour_width = opts.contour_width
+ contour_width = args.contour_width
- cmap = opts.cmap
+ cmap = args.cmap
if cmap is None:
cmap = 'jet'
tf = ColorTransferFunction((mi-2, ma+2))
@@ -1590,12 +1404,12 @@
cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
image = cam.snapshot()
- if opts.enhance:
+ if args.enhance:
for i in range(3):
image[:,:,i] = image[:,:,i]/(image[:,:,i].mean() + 5.*image[:,:,i].std())
image[image>1.0]=1.0
- save_name = opts.output
+ save_name = args.output
if save_name is None:
save_name = "%s"%pf+"_"+field+"_rendering.png"
if not '.png' in save_name:
@@ -1605,9 +1419,7 @@
def run_main():
- for co in ["--parallel", "--paste"]:
- if co in sys.argv: del sys.argv[sys.argv.index(co)]
- YT = YTCommands()
- sys.exit(YT.main())
+ args = parser.parse_args()
+ args.func(args)
if __name__ == "__main__": run_main()
diff -r 64ee72664bb20977421c497cfd18e8cc446159af -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a yt/utilities/pasteboard.py
--- a/yt/utilities/pasteboard.py
+++ /dev/null
@@ -1,166 +0,0 @@
-"""
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
- Copyright (C) 2011 Matthew Turk. All Rights Reserved.
-
- This file is part of yt.
-
- yt is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from mercurial import ui, repo, commands, hg
-import json
-import os
-import time
-import uuid
-import urllib
-
-from yt.config import ytcfg
-
-def _get_last_mod(filectx):
- rev = filectx.filectx(filectx.filerev())
- return rev
-
-class PostInventory(object):
- def __init__(self, uu = None, repo_fn = None):
- if uu is None: uu = ui.ui()
- if repo_fn is None: repo_fn = ytcfg.get("yt","pasteboard_repo")
- if repo_fn == '':
- raise KeyError("~/.yt/config:[yt]pasteboard_repo")
- self.repo_fn = repo_fn
- self.bbrepo = hg.repository(uu, repo_fn)
- config_fn = os.path.join(repo_fn, ".hg", "hgrc")
- uu.readconfig(config_fn)
- commands.pull(uu, self.bbrepo)
- commands.update(uu, self.bbrepo, clean=True)
- if not os.path.exists(os.path.join(repo_fn, "posts")):
- os.makedirs(os.path.join(repo_fn, "posts"))
- if not os.path.exists(os.path.join(repo_fn, "html")):
- os.makedirs(os.path.join(repo_fn, "html"))
- self.uu = uu
-
- def regenerate_posts(self):
- self.posts = []
- for file in self.bbrepo["tip"]:
- if file.startswith("posts/") and file.count("/") == 1 \
- and not file.endswith(".desc"):
- filectx = self.bbrepo["tip"][file]
- last_mod = _get_last_mod(filectx).date()
- self.posts.append((last_mod[0] + last_mod[1], file))
- self.posts.sort()
- self.posts = self.posts[::-1]
-
- def add_post(self, filename, desc = None,
- uu = None, highlight = True, push = True):
- # We assume the post filename exists in the current space
- self.regenerate_posts()
- if uu is None: uu = self.uu
- prefix = uuid.uuid4()
- name = "%s-%s" % (prefix, os.path.basename(filename))
- name_noext = name.replace(".","-")
- hfn = "html/%s.html" % (name_noext)
- pfn = "posts/%s" % (name)
- abs_pfn = os.path.join(self.repo_fn, pfn)
- abs_hfn = os.path.join(self.repo_fn, hfn)
- if desc is not None:
- open(abs_pfn + ".desc", "w").write(desc)
- self.posts.insert(0, (int(time.time()), "posts/%s" % name))
- if not os.path.exists(abs_pfn):
- open(abs_pfn,"w").write(open(filename).read())
- inv_fname = self.update_inventory()
- if highlight and not name.endswith(".html"):
- from pygments.cmdline import main as pygmain
- rv = pygmain(["pygmentize", "-o", abs_hfn,
- "-O", "full", abs_pfn])
- if not highlight or rv:
- content = open(abs_pfn).read()
- open(abs_hfn, "w").write(
- "<HTML><BODY><PRE>" + content + "</PRE></BODY></HTML>")
- to_manage = [abs_pfn, abs_hfn]
- if desc is not None: to_manage.append(abs_pfn + ".desc")
- commands.add(uu, self.bbrepo, *to_manage)
- commands.commit(uu, self.bbrepo, *(to_manage + [inv_fname]),
- message="Adding %s" % name)
- if push: commands.push(uu, self.bbrepo)
-
- def update_inventory(self):
- tip = self.bbrepo["tip"]
- vals = []
- for t, pfn in self.posts:
- dfn = pfn + ".desc"
- if dfn in tip:
- d = tip[dfn].data()
- last_mod =_get_last_mod(tip[dfn])
- last_hash = last_mod.hex()
- uname = last_mod.user()
- elif pfn not in tip:
- abs_pfn = os.path.join(self.repo_fn, pfn)
- uname = self.uu.config("ui","username")
- if os.path.exists(abs_pfn + ".desc"):
- d = open(abs_pfn + ".desc").read()
- else:
- d = open(abs_pfn).read()
- last_hash = "tip"
- else:
- d = tip[pfn].data()
- last_mod = _get_last_mod(tip[pfn])
- last_hash = last_mod.hex()
- uname = last_mod.user()
- if len(d) > 80: d = d[:77] + "..."
- name_noext = pfn[6:].replace(".","-")
- vals.append(dict(modified = time.ctime(t),
- modtime = t,
- lastmod_hash = last_hash,
- fullname = pfn,
- htmlname = "html/%s.html" % name_noext,
- name = pfn[43:], # 6 for posts/ then 36 for UUID
- username = uname,
- descr = d))
- fn = os.path.join(self.repo_fn, "inventory.json")
- f = open(fn, "w")
- f.write("var inventory_data = ")
- json.dump(vals, f, indent = 1)
- f.write(";")
- return fn
-
-def retrieve_pastefile(username, paste_id, output_fn = None):
- # First we get the username's inventory.json
- s = urllib.urlopen("http://%s.bitbucket.org/inventory.json" % (username))
- data = s.read()
- # This is an ugly, ugly hack for my lack of understanding of how best to
- # handle this JSON stuff.
- data = data[data.find("=")+1:data.rfind(";")]
- #import pdb;pdb.set_trace()
- inv = json.loads(data)
- k = None
- if len(paste_id) == 36:
- # Then this is a UUID
- for k in inv:
- if k['fullname'][6:42] == paste_id: break
- elif len(paste_id) == 10:
- pp = int(paste_id)
- for k in inv:
- if k['modtime'] == pp: break
- if k is None: raise KeyError(k)
- # k is our key
- url = "http://%s.bitbucket.org/%s" % (username, k['fullname'])
- s = urllib.urlopen(url)
- data = s.read()
- if output_fn is not None:
- if os.path.exists(output_fn): raise IOError(output_fn)
- open(output_fn, "w").write(data)
- else:
- print data
diff -r 64ee72664bb20977421c497cfd18e8cc446159af -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a yt/utilities/pexpect.py
--- a/yt/utilities/pexpect.py
+++ /dev/null
@@ -1,1845 +0,0 @@
-"""Pexpect is a Python module for spawning child applications and controlling
-them automatically. Pexpect can be used for automating interactive applications
-such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
-scripts for duplicating software package installations on different servers. It
-can be used for automated software testing. Pexpect is in the spirit of Don
-Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
-require TCL and Expect or require C extensions to be compiled. Pexpect does not
-use C, Expect, or TCL extensions. It should work on any platform that supports
-the standard Python pty module. The Pexpect interface focuses on ease of use so
-that simple tasks are easy.
-
-There are two main interfaces to Pexpect -- the function, run() and the class,
-spawn. You can call the run() function to execute a command and return the
-output. This is a handy replacement for os.system().
-
-For example::
-
- pexpect.run('ls -la')
-
-The more powerful interface is the spawn class. You can use this to spawn an
-external child command and then interact with the child by sending lines and
-expecting responses.
-
-For example::
-
- child = pexpect.spawn('scp foo myname at host.example.com:.')
- child.expect ('Password:')
- child.sendline (mypassword)
-
-This works even for commands that ask for passwords or other input outside of
-the normal stdio streams.
-
-Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
-Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
-vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
-Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
-Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
-Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
-
-Free, open source, and all that good stuff.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-Pexpect Copyright (c) 2008 Noah Spurrier
-http://pexpect.sourceforge.net/
-
-$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
-"""
-
-try:
- import os, sys, time
- import select
- import string
- import re
- import struct
- import resource
- import types
- import pty
- import tty
- import termios
- import fcntl
- import errno
- import traceback
- import signal
-except ImportError, e:
- raise ImportError (str(e) + """
-
-A critical module was not found. Probably this operating system does not
-support it. Pexpect is intended for UNIX-like operating systems.""")
-
-__version__ = '2.3'
-__revision__ = '$Revision: 399 $'
-__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
- 'split_command_line', '__version__', '__revision__']
-
-# Exception classes used by this module.
-class ExceptionPexpect(Exception):
-
- """Base class for all exceptions raised by this module.
- """
-
- def __init__(self, value):
-
- self.value = value
-
- def __str__(self):
-
- return str(self.value)
-
- def get_trace(self):
-
- """This returns an abbreviated stack trace with lines that only concern
- the caller. In other words, the stack trace inside the Pexpect module
- is not included. """
-
- tblist = traceback.extract_tb(sys.exc_info()[2])
- #tblist = filter(self.__filter_not_pexpect, tblist)
- tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
- tblist = traceback.format_list(tblist)
- return ''.join(tblist)
-
- def __filter_not_pexpect(self, trace_list_item):
-
- """This returns True if list item 0 the string 'pexpect.py' in it. """
-
- if trace_list_item[0].find('pexpect.py') == -1:
- return True
- else:
- return False
-
-class EOF(ExceptionPexpect):
-
- """Raised when EOF is read from a child. This usually means the child has exited."""
-
-class TIMEOUT(ExceptionPexpect):
-
- """Raised when a read time exceeds the timeout. """
-
-##class TIMEOUT_PATTERN(TIMEOUT):
-## """Raised when the pattern match time exceeds the timeout.
-## This is different than a read TIMEOUT because the child process may
-## give output, thus never give a TIMEOUT, but the output
-## may never match a pattern.
-## """
-##class MAXBUFFER(ExceptionPexpect):
-## """Raised when a scan buffer fills before matching an expected pattern."""
-
-def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
-
- """
- This function runs the given command; waits for it to finish; then
- returns all output as a string. STDERR is included in output. If the full
- path to the command is not given then the path is searched.
-
- Note that lines are terminated by CR/LF (\\r\\n) combination even on
- UNIX-like systems because this is the standard for pseudo ttys. If you set
- 'withexitstatus' to true, then run will return a tuple of (command_output,
- exitstatus). If 'withexitstatus' is false then this returns just
- command_output.
-
- The run() function can often be used instead of creating a spawn instance.
- For example, the following code uses spawn::
-
- from pexpect import *
- child = spawn('scp foo myname at host.example.com:.')
- child.expect ('(?i)password')
- child.sendline (mypassword)
-
- The previous code can be replace with the following::
-
- from pexpect import *
- run ('scp foo myname at host.example.com:.', events={'(?i)password': mypassword})
-
- Examples
- ========
-
- Start the apache daemon on the local machine::
-
- from pexpect import *
- run ("/usr/local/apache/bin/apachectl start")
-
- Check in a file using SVN::
-
- from pexpect import *
- run ("svn ci -m 'automatic commit' my_file.py")
-
- Run a command and capture exit status::
-
- from pexpect import *
- (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
-
- Tricky Examples
- ===============
-
- The following will run SSH and execute 'ls -l' on the remote machine. The
- password 'secret' will be sent if the '(?i)password' pattern is ever seen::
-
- run ("ssh username at machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
-
- This will start mencoder to rip a video from DVD. This will also display
- progress ticks every 5 seconds as it runs. For example::
-
- from pexpect import *
- def print_ticks(d):
- print d['event_count'],
- run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
-
- The 'events' argument should be a dictionary of patterns and responses.
- Whenever one of the patterns is seen in the command out run() will send the
- associated response string. Note that you should put newlines in your
- string if Enter is necessary. The responses may also contain callback
- functions. Any callback is function that takes a dictionary as an argument.
- The dictionary contains all the locals from the run() function, so you can
- access the child spawn object or any other variable defined in run()
- (event_count, child, and extra_args are the most useful). A callback may
- return True to stop the current run process otherwise run() continues until
- the next event. A callback may also return a string which will be sent to
- the child. 'extra_args' is not used by directly run(). It provides a way to
- pass data to a callback function through run() through the locals
- dictionary passed to a callback. """
-
- if timeout == -1:
- child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
- else:
- child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
- if events is not None:
- patterns = events.keys()
- responses = events.values()
- else:
- patterns=None # We assume that EOF or TIMEOUT will save us.
- responses=None
- child_result_list = []
- event_count = 0
- while 1:
- try:
- index = child.expect (patterns)
- if type(child.after) in types.StringTypes:
- child_result_list.append(child.before + child.after)
- else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
- child_result_list.append(child.before)
- if type(responses[index]) in types.StringTypes:
- child.send(responses[index])
- elif type(responses[index]) is types.FunctionType:
- callback_result = responses[index](locals())
- sys.stdout.flush()
- if type(callback_result) in types.StringTypes:
- child.send(callback_result)
- elif callback_result:
- break
- else:
- raise TypeError ('The callback must be a string or function type.')
- event_count = event_count + 1
- except TIMEOUT, e:
- child_result_list.append(child.before)
- break
- except EOF, e:
- child_result_list.append(child.before)
- break
- child_result = ''.join(child_result_list)
- if withexitstatus:
- child.close()
- return (child_result, child.exitstatus)
- else:
- return child_result
-
-class spawn (object):
-
- """This is the main class interface for Pexpect. Use this class to start
- and control child applications. """
-
- def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
-
- """This is the constructor. The command parameter may be a string that
- includes a command and any arguments to the command. For example::
-
- child = pexpect.spawn ('/usr/bin/ftp')
- child = pexpect.spawn ('/usr/bin/ssh user at example.com')
- child = pexpect.spawn ('ls -latr /tmp')
-
- You may also construct it with a list of arguments like so::
-
- child = pexpect.spawn ('/usr/bin/ftp', [])
- child = pexpect.spawn ('/usr/bin/ssh', ['user at example.com'])
- child = pexpect.spawn ('ls', ['-latr', '/tmp'])
-
- After this the child application will be created and will be ready to
- talk to. For normal use, see expect() and send() and sendline().
-
- Remember that Pexpect does NOT interpret shell meta characters such as
- redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
- If you want to run a command and pipe it through another command then
- you must also start a shell. For example::
-
- child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
- child.expect(pexpect.EOF)
-
- The second form of spawn (where you pass a list of arguments) is useful
- in situations where you wish to spawn a command and pass it its own
- argument list. This can make syntax more clear. For example, the
- following is equivalent to the previous example::
-
- shell_cmd = 'ls -l | grep LOG > log_list.txt'
- child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
- child.expect(pexpect.EOF)
-
- The maxread attribute sets the read buffer size. This is maximum number
- of bytes that Pexpect will try to read from a TTY at one time. Setting
- the maxread size to 1 will turn off buffering. Setting the maxread
- value higher may help performance in cases where large amounts of
- output are read back from the child. This feature is useful in
- conjunction with searchwindowsize.
-
- The searchwindowsize attribute sets the how far back in the incomming
- seach buffer Pexpect will search for pattern matches. Every time
- Pexpect reads some data from the child it will append the data to the
- incomming buffer. The default is to search from the beginning of the
- imcomming buffer each time new data is read from the child. But this is
- very inefficient if you are running a command that generates a large
- amount of data where you want to match The searchwindowsize does not
- effect the size of the incomming data buffer. You will still have
- access to the full buffer after expect() returns.
-
- The logfile member turns on or off logging. All input and output will
- be copied to the given file object. Set logfile to None to stop
- logging. This is the default. Set logfile to sys.stdout to echo
- everything to standard output. The logfile is flushed after each write.
-
- Example log input and output to a file::
-
- child = pexpect.spawn('some_command')
- fout = file('mylog.txt','w')
- child.logfile = fout
-
- Example log to stdout::
-
- child = pexpect.spawn('some_command')
- child.logfile = sys.stdout
-
- The logfile_read and logfile_send members can be used to separately log
- the input from the child and output sent to the child. Sometimes you
- don't want to see everything you write to the child. You only want to
- log what the child sends back. For example::
-
- child = pexpect.spawn('some_command')
- child.logfile_read = sys.stdout
-
- To separately log output sent to the child use logfile_send::
-
- self.logfile_send = fout
-
- The delaybeforesend helps overcome a weird behavior that many users
- were experiencing. The typical problem was that a user would expect() a
- "Password:" prompt and then immediately call sendline() to send the
- password. The user would then see that their password was echoed back
- to them. Passwords don't normally echo. The problem is caused by the
- fact that most applications print out the "Password" prompt and then
- turn off stdin echo, but if you send your password before the
- application turned off echo, then you get your password echoed.
- Normally this wouldn't be a problem when interacting with a human at a
- real keyboard. If you introduce a slight delay just before writing then
- this seems to clear up the problem. This was such a common problem for
- many users that I decided that the default pexpect behavior should be
- to sleep just before writing to the child application. 1/20th of a
- second (50 ms) seems to be enough to clear up the problem. You can set
- delaybeforesend to 0 to return to the old behavior. Most Linux machines
- don't like this to be below 0.03. I don't know why.
-
- Note that spawn is clever about finding commands on your path.
- It uses the same logic that "which" uses to find executables.
-
- If you wish to get the exit status of the child you must call the
- close() method. The exit or signal status of the child will be stored
- in self.exitstatus or self.signalstatus. If the child exited normally
- then exitstatus will store the exit return code and signalstatus will
- be None. If the child was terminated abnormally with a signal then
- signalstatus will store the signal value and exitstatus will be None.
- If you need more detail you can also read the self.status member which
- stores the status returned by os.waitpid. You can interpret this using
- os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
-
- self.STDIN_FILENO = pty.STDIN_FILENO
- self.STDOUT_FILENO = pty.STDOUT_FILENO
- self.STDERR_FILENO = pty.STDERR_FILENO
- self.stdin = sys.stdin
- self.stdout = sys.stdout
- self.stderr = sys.stderr
-
- self.searcher = None
- self.ignorecase = False
- self.before = None
- self.after = None
- self.match = None
- self.match_index = None
- self.terminated = True
- self.exitstatus = None
- self.signalstatus = None
- self.status = None # status returned by os.waitpid
- self.flag_eof = False
- self.pid = None
- self.child_fd = -1 # initially closed
- self.timeout = timeout
- self.delimiter = EOF
- self.logfile = logfile
- self.logfile_read = None # input from child (read_nonblocking)
- self.logfile_send = None # output to send (send, sendline)
- self.maxread = maxread # max bytes to read at one time into buffer
- self.buffer = '' # This is the read buffer. See maxread.
- self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
- # Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
- self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
- self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
- self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
- self.softspace = False # File-like object.
- self.name = '<' + repr(self) + '>' # File-like object.
- self.encoding = None # File-like object.
- self.closed = True # File-like object.
- self.cwd = cwd
- self.env = env
- self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
- # Solaris uses internal __fork_pty(). All others use pty.fork().
- if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
- self.use_native_pty_fork = False
- else:
- self.use_native_pty_fork = True
-
-
- # allow dummy instances for subclasses that may not use command or args.
- if command is None:
- self.command = None
- self.args = None
- self.name = '<pexpect factory incomplete>'
- else:
- self._spawn (command, args)
-
- def __del__(self):
-
- """This makes sure that no system resources are left open. Python only
- garbage collects Python objects. OS file descriptors are not Python
- objects, so they must be handled explicitly. If the child file
- descriptor was opened outside of this class (passed to the constructor)
- then this does not close it. """
-
- if not self.closed:
- # It is possible for __del__ methods to execute during the
- # teardown of the Python VM itself. Thus self.close() may
- # trigger an exception because os.close may be None.
- # -- Fernando Perez
- try:
- self.close()
- except AttributeError:
- pass
-
- def __str__(self):
-
- """This returns a human-readable string that represents the state of
- the object. """
-
- s = []
- s.append(repr(self))
- s.append('version: ' + __version__ + ' (' + __revision__ + ')')
- s.append('command: ' + str(self.command))
- s.append('args: ' + str(self.args))
- s.append('searcher: ' + str(self.searcher))
- s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
- s.append('before (last 100 chars): ' + str(self.before)[-100:])
- s.append('after: ' + str(self.after))
- s.append('match: ' + str(self.match))
- s.append('match_index: ' + str(self.match_index))
- s.append('exitstatus: ' + str(self.exitstatus))
- s.append('flag_eof: ' + str(self.flag_eof))
- s.append('pid: ' + str(self.pid))
- s.append('child_fd: ' + str(self.child_fd))
- s.append('closed: ' + str(self.closed))
- s.append('timeout: ' + str(self.timeout))
- s.append('delimiter: ' + str(self.delimiter))
- s.append('logfile: ' + str(self.logfile))
- s.append('logfile_read: ' + str(self.logfile_read))
- s.append('logfile_send: ' + str(self.logfile_send))
- s.append('maxread: ' + str(self.maxread))
- s.append('ignorecase: ' + str(self.ignorecase))
- s.append('searchwindowsize: ' + str(self.searchwindowsize))
- s.append('delaybeforesend: ' + str(self.delaybeforesend))
- s.append('delayafterclose: ' + str(self.delayafterclose))
- s.append('delayafterterminate: ' + str(self.delayafterterminate))
- return '\n'.join(s)
-
- def _spawn(self,command,args=[]):
-
- """This starts the given command in a child process. This does all the
- fork/exec type of stuff for a pty. This is called by __init__. If args
- is empty then command will be parsed (split on spaces) and args will be
- set to parsed arguments. """
-
- # The pid and child_fd of this object get set by this method.
- # Note that it is difficult for this method to fail.
- # You cannot detect if the child process cannot start.
- # So the only way you can tell if the child process started
- # or not is to try to read from the file descriptor. If you get
- # EOF immediately then it means that the child is already dead.
- # That may not necessarily be bad because you may haved spawned a child
- # that performs some task; creates no stdout output; and then dies.
-
- # If command is an int type then it may represent a file descriptor.
- if type(command) == type(0):
- raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
-
- if type (args) != type([]):
- raise TypeError ('The argument, args, must be a list.')
-
- if args == []:
- self.args = split_command_line(command)
- self.command = self.args[0]
- else:
- self.args = args[:] # work with a copy
- self.args.insert (0, command)
- self.command = command
-
- command_with_path = which(self.command)
- if command_with_path is None:
- raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
- self.command = command_with_path
- self.args[0] = self.command
-
- self.name = '<' + ' '.join (self.args) + '>'
-
- assert self.pid is None, 'The pid member should be None.'
- assert self.command is not None, 'The command member should not be None.'
-
- if self.use_native_pty_fork:
- try:
- self.pid, self.child_fd = pty.fork()
- except OSError, e:
- raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
- else: # Use internal __fork_pty
- self.pid, self.child_fd = self.__fork_pty()
-
- if self.pid == 0: # Child
- try:
- self.child_fd = sys.stdout.fileno() # used by setwinsize()
- self.setwinsize(24, 80)
- except:
- # Some platforms do not like setwinsize (Cygwin).
- # This will cause problem when running applications that
- # are very picky about window size.
- # This is a serious limitation, but not a show stopper.
- pass
- # Do not allow child to inherit open file descriptors from parent.
- max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
- for i in range (3, max_fd):
- try:
- os.close (i)
- except OSError:
- pass
-
- # I don't know why this works, but ignoring SIGHUP fixes a
- # problem when trying to start a Java daemon with sudo
- # (specifically, Tomcat).
- signal.signal(signal.SIGHUP, signal.SIG_IGN)
-
- if self.cwd is not None:
- os.chdir(self.cwd)
- if self.env is None:
- os.execv(self.command, self.args)
- else:
- os.execvpe(self.command, self.args, self.env)
-
- # Parent
- self.terminated = False
- self.closed = False
-
- def __fork_pty(self):
-
- """This implements a substitute for the forkpty system call. This
- should be more portable than the pty.fork() function. Specifically,
- this should work on Solaris.
-
- Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
- resolve the issue with Python's pty.fork() not supporting Solaris,
- particularly ssh. Based on patch to posixmodule.c authored by Noah
- Spurrier::
-
- http://mail.python.org/pipermail/python-dev/2003-May/035281.html
-
- """
-
- parent_fd, child_fd = os.openpty()
- if parent_fd < 0 or child_fd < 0:
- raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
-
- pid = os.fork()
- if pid < 0:
- raise ExceptionPexpect, "Error! Failed os.fork()."
- elif pid == 0:
- # Child.
- os.close(parent_fd)
- self.__pty_make_controlling_tty(child_fd)
-
- os.dup2(child_fd, 0)
- os.dup2(child_fd, 1)
- os.dup2(child_fd, 2)
-
- if child_fd > 2:
- os.close(child_fd)
- else:
- # Parent.
- os.close(child_fd)
-
- return pid, parent_fd
-
- def __pty_make_controlling_tty(self, tty_fd):
-
- """This makes the pseudo-terminal the controlling tty. This should be
- more portable than the pty.fork() function. Specifically, this should
- work on Solaris. """
-
- child_name = os.ttyname(tty_fd)
-
- # Disconnect from controlling tty if still connected.
- fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
- if fd >= 0:
- os.close(fd)
-
- os.setsid()
-
- # Verify we are disconnected from controlling tty
- try:
- fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
- if fd >= 0:
- os.close(fd)
- raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
- except:
- # Good! We are disconnected from a controlling tty.
- pass
-
- # Verify we can open child pty.
- fd = os.open(child_name, os.O_RDWR);
- if fd < 0:
- raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
- else:
- os.close(fd)
-
- # Verify we now have a controlling tty.
- fd = os.open("/dev/tty", os.O_WRONLY)
- if fd < 0:
- raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
- else:
- os.close(fd)
-
- def fileno (self): # File-like object.
-
- """This returns the file descriptor of the pty for the child.
- """
-
- return self.child_fd
-
- def close (self, force=True): # File-like object.
-
- """This closes the connection with the child application. Note that
- calling close() more than once is valid. This emulates standard Python
- behavior with files. Set force to True if you want to make sure that
- the child is terminated (SIGKILL is sent if the child ignores SIGHUP
- and SIGINT). """
-
- if not self.closed:
- self.flush()
- os.close (self.child_fd)
- time.sleep(self.delayafterclose) # Give kernel time to update process status.
- if self.isalive():
- if not self.terminate(force):
- raise ExceptionPexpect ('close() could not terminate the child using terminate()')
- self.child_fd = -1
- self.closed = True
- #self.pid = None
-
- def flush (self): # File-like object.
-
- """This does nothing. It is here to support the interface for a
- File-like object. """
-
- pass
-
- def isatty (self): # File-like object.
-
- """This returns True if the file descriptor is open and connected to a
- tty(-like) device, else False. """
-
- return os.isatty(self.child_fd)
-
- def waitnoecho (self, timeout=-1):
-
- """This waits until the terminal ECHO flag is set False. This returns
- True if the echo mode is off. This returns False if the ECHO flag was
- not set False before the timeout. This can be used to detect when the
- child is waiting for a password. Usually a child application will turn
- off echo mode when it is waiting for the user to enter a password. For
- example, instead of expecting the "password:" prompt you can wait for
- the child to set ECHO off::
-
- p = pexpect.spawn ('ssh user at example.com')
- p.waitnoecho()
- p.sendline(mypassword)
-
- If timeout is None then this method to block forever until ECHO flag is
- False.
-
- """
-
- if timeout == -1:
- timeout = self.timeout
- if timeout is not None:
- end_time = time.time() + timeout
- while True:
- if not self.getecho():
- return True
- if timeout < 0 and timeout is not None:
- return False
- if timeout is not None:
- timeout = end_time - time.time()
- time.sleep(0.1)
-
- def getecho (self):
-
- """This returns the terminal echo mode. This returns True if echo is
- on or False if echo is off. Child applications that are expecting you
- to enter a password often set ECHO False. See waitnoecho(). """
-
- attr = termios.tcgetattr(self.child_fd)
- if attr[3] & termios.ECHO:
- return True
- return False
-
- def setecho (self, state):
-
- """This sets the terminal echo mode on or off. Note that anything the
- child sent before the echo will be lost, so you should be sure that
- your input buffer is empty before you call setecho(). For example, the
- following will work as expected::
-
- p = pexpect.spawn('cat')
- p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
- p.expect (['1234'])
- p.expect (['1234'])
- p.setecho(False) # Turn off tty echo
- p.sendline ('abcd') # We will set this only once (echoed by cat).
- p.sendline ('wxyz') # We will set this only once (echoed by cat)
- p.expect (['abcd'])
- p.expect (['wxyz'])
-
- The following WILL NOT WORK because the lines sent before the setecho
- will be lost::
-
- p = pexpect.spawn('cat')
- p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
- p.setecho(False) # Turn off tty echo
- p.sendline ('abcd') # We will set this only once (echoed by cat).
- p.sendline ('wxyz') # We will set this only once (echoed by cat)
- p.expect (['1234'])
- p.expect (['1234'])
- p.expect (['abcd'])
- p.expect (['wxyz'])
- """
-
- self.child_fd
- attr = termios.tcgetattr(self.child_fd)
- if state:
- attr[3] = attr[3] | termios.ECHO
- else:
- attr[3] = attr[3] & ~termios.ECHO
- # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
- # and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
- termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
-
- def read_nonblocking (self, size = 1, timeout = -1):
-
- """This reads at most size characters from the child application. It
- includes a timeout. If the read does not complete within the timeout
- period then a TIMEOUT exception is raised. If the end of file is read
- then an EOF exception will be raised. If a log file was set using
- setlog() then all data will also be written to the log file.
-
- If timeout is None then the read may block indefinitely. If timeout is -1
- then the self.timeout value is used. If timeout is 0 then the child is
- polled and if there was no data immediately ready then this will raise
- a TIMEOUT exception.
-
- The timeout refers only to the amount of time to read at least one
- character. This is not effected by the 'size' parameter, so if you call
- read_nonblocking(size=100, timeout=30) and only one character is
- available right away then one character will be returned immediately.
- It will not wait for 30 seconds for another 99 characters to come in.
-
- This is a wrapper around os.read(). It uses select.select() to
- implement the timeout. """
-
- if self.closed:
- raise ValueError ('I/O operation on closed file in read_nonblocking().')
-
- if timeout == -1:
- timeout = self.timeout
-
- # Note that some systems such as Solaris do not give an EOF when
- # the child dies. In fact, you can still try to read
- # from the child_fd -- it will block forever or until TIMEOUT.
- # For this case, I test isalive() before doing any reading.
- # If isalive() is false, then I pretend that this is the same as EOF.
- if not self.isalive():
- r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
- if not r:
- self.flag_eof = True
- raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
- elif self.__irix_hack:
- # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
- # This adds a 2 second delay, but only when the child is terminated.
- r, w, e = self.__select([self.child_fd], [], [], 2)
- if not r and not self.isalive():
- self.flag_eof = True
- raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
-
- r,w,e = self.__select([self.child_fd], [], [], timeout)
-
- if not r:
- if not self.isalive():
- # Some platforms, such as Irix, will claim that their processes are alive;
- # then timeout on the select; and then finally admit that they are not alive.
- self.flag_eof = True
- raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
- else:
- raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
-
- if self.child_fd in r:
- try:
- s = os.read(self.child_fd, size)
- except OSError, e: # Linux does this
- self.flag_eof = True
- raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
- if s == '': # BSD style
- self.flag_eof = True
- raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
-
- if self.logfile is not None:
- self.logfile.write (s)
- self.logfile.flush()
- if self.logfile_read is not None:
- self.logfile_read.write (s)
- self.logfile_read.flush()
-
- return s
-
- raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
-
- def read (self, size = -1): # File-like object.
-
- """This reads at most "size" bytes from the file (less if the read hits
- EOF before obtaining size bytes). If the size argument is negative or
- omitted, read all data until EOF is reached. The bytes are returned as
- a string object. An empty string is returned when EOF is encountered
- immediately. """
-
- if size == 0:
- return ''
- if size < 0:
- self.expect (self.delimiter) # delimiter default is EOF
- return self.before
-
- # I could have done this more directly by not using expect(), but
- # I deliberately decided to couple read() to expect() so that
- # I would catch any bugs early and ensure consistant behavior.
- # It's a little less efficient, but there is less for me to
- # worry about if I have to later modify read() or expect().
- # Note, it's OK if size==-1 in the regex. That just means it
- # will never match anything in which case we stop only on EOF.
- cre = re.compile('.{%d}' % size, re.DOTALL)
- index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
- if index == 0:
- return self.after ### self.before should be ''. Should I assert this?
- return self.before
-
- def readline (self, size = -1): # File-like object.
-
- """This reads and returns one entire line. A trailing newline is kept
- in the string, but may be absent when a file ends with an incomplete
- line. Note: This readline() looks for a \\r\\n pair even on UNIX
- because this is what the pseudo tty device returns. So contrary to what
- you may expect you will receive the newline as \\r\\n. An empty string
- is returned when EOF is hit immediately. Currently, the size argument is
- mostly ignored, so this behavior is not standard for a file-like
- object. If size is 0 then an empty string is returned. """
-
- if size == 0:
- return ''
- index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
- if index == 0:
- return self.before + '\r\n'
- else:
- return self.before
-
- def __iter__ (self): # File-like object.
-
- """This is to support iterators over a file-like object.
- """
-
- return self
-
- def next (self): # File-like object.
-
- """This is to support iterators over a file-like object.
- """
-
- result = self.readline()
- if result == "":
- raise StopIteration
- return result
-
- def readlines (self, sizehint = -1): # File-like object.
-
- """This reads until EOF using readline() and returns a list containing
- the lines thus read. The optional "sizehint" argument is ignored. """
-
- lines = []
- while True:
- line = self.readline()
- if not line:
- break
- lines.append(line)
- return lines
-
- def write(self, s): # File-like object.
-
- """This is similar to send() except that there is no return value.
- """
-
- self.send (s)
-
- def writelines (self, sequence): # File-like object.
-
- """This calls write() for each element in the sequence. The sequence
- can be any iterable object producing strings, typically a list of
- strings. This does not add line separators There is no return value.
- """
-
- for s in sequence:
- self.write (s)
-
- def send(self, s):
-
- """This sends a string to the child process. This returns the number of
- bytes written. If a log file was set then the data is also written to
- the log. """
-
- time.sleep(self.delaybeforesend)
- if self.logfile is not None:
- self.logfile.write (s)
- self.logfile.flush()
- if self.logfile_send is not None:
- self.logfile_send.write (s)
- self.logfile_send.flush()
- c = os.write(self.child_fd, s)
- return c
-
- def sendline(self, s=''):
-
- """This is like send(), but it adds a line feed (os.linesep). This
- returns the number of bytes written. """
-
- n = self.send(s)
- n = n + self.send (os.linesep)
- return n
-
- def sendcontrol(self, char):
-
- """This sends a control character to the child such as Ctrl-C or
- Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
-
- child.sendcontrol('g')
-
- See also, sendintr() and sendeof().
- """
-
- char = char.lower()
- a = ord(char)
- if a>=97 and a<=122:
- a = a - ord('a') + 1
- return self.send (chr(a))
- d = {'@':0, '`':0,
- '[':27, '{':27,
- '\\':28, '|':28,
- ']':29, '}': 29,
- '^':30, '~':30,
- '_':31,
- '?':127}
- if char not in d:
- return 0
- return self.send (chr(d[char]))
-
- def sendeof(self):
-
- """This sends an EOF to the child. This sends a character which causes
- the pending parent output buffer to be sent to the waiting child
- program without waiting for end-of-line. If it is the first character
- of the line, the read() in the user program returns 0, which signifies
- end-of-file. This means to work as expected a sendeof() has to be
- called at the beginning of a line. This method does not send a newline.
- It is the responsibility of the caller to ensure the eof is sent at the
- beginning of a line. """
-
- ### Hmmm... how do I send an EOF?
- ###C if ((m = write(pty, *buf, p - *buf)) < 0)
- ###C return (errno == EWOULDBLOCK) ? n : -1;
- #fd = sys.stdin.fileno()
- #old = termios.tcgetattr(fd) # remember current state
- #attr = termios.tcgetattr(fd)
- #attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
- #try: # use try/finally to ensure state gets restored
- # termios.tcsetattr(fd, termios.TCSADRAIN, attr)
- # if hasattr(termios, 'CEOF'):
- # os.write (self.child_fd, '%c' % termios.CEOF)
- # else:
- # # Silly platform does not define CEOF so assume CTRL-D
- # os.write (self.child_fd, '%c' % 4)
- #finally: # restore state
- # termios.tcsetattr(fd, termios.TCSADRAIN, old)
- if hasattr(termios, 'VEOF'):
- char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
- else:
- # platform does not define VEOF so assume CTRL-D
- char = chr(4)
- self.send(char)
-
- def sendintr(self):
-
- """This sends a SIGINT to the child. It does not require
- the SIGINT to be the first character on a line. """
-
- if hasattr(termios, 'VINTR'):
- char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
- else:
- # platform does not define VINTR so assume CTRL-C
- char = chr(3)
- self.send (char)
-
- def eof (self):
-
- """This returns True if the EOF exception was ever raised.
- """
-
- return self.flag_eof
-
- def terminate(self, force=False):
-
- """This forces a child process to terminate. It starts nicely with
- SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
- returns True if the child was terminated. This returns False if the
- child could not be terminated. """
-
- if not self.isalive():
- return True
- try:
- self.kill(signal.SIGHUP)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGCONT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- self.kill(signal.SIGINT)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- if force:
- self.kill(signal.SIGKILL)
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
- return False
- except OSError, e:
- # I think there are kernel timing issues that sometimes cause
- # this to happen. I think isalive() reports True, but the
- # process is dead to the kernel.
- # Make one last attempt to see if the kernel is up to date.
- time.sleep(self.delayafterterminate)
- if not self.isalive():
- return True
- else:
- return False
-
- def wait(self):
-
- """This waits until the child exits. This is a blocking call. This will
- not read any data from the child, so this will block forever if the
- child has unread output and has terminated. In other words, the child
- may have printed output then called exit(); but, technically, the child
- is still alive until its output is read. """
-
- if self.isalive():
- pid, status = os.waitpid(self.pid, 0)
- else:
- raise ExceptionPexpect ('Cannot wait for dead child process.')
- self.exitstatus = os.WEXITSTATUS(status)
- if os.WIFEXITED (status):
- self.status = status
- self.exitstatus = os.WEXITSTATUS(status)
- self.signalstatus = None
- self.terminated = True
- elif os.WIFSIGNALED (status):
- self.status = status
- self.exitstatus = None
- self.signalstatus = os.WTERMSIG(status)
- self.terminated = True
- elif os.WIFSTOPPED (status):
- raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
- return self.exitstatus
-
- def isalive(self):
-
- """This tests if the child process is running or not. This is
- non-blocking. If the child was terminated then this will read the
- exitstatus or signalstatus of the child. This returns True if the child
- process appears to be running or False if not. It can take literally
- SECONDS for Solaris to return the right status. """
-
- if self.terminated:
- return False
-
- if self.flag_eof:
- # This is for Linux, which requires the blocking form of waitpid to get
- # status of a defunct process. This is super-lame. The flag_eof would have
- # been set in read_nonblocking(), so this should be safe.
- waitpid_options = 0
- else:
- waitpid_options = os.WNOHANG
-
- try:
- pid, status = os.waitpid(self.pid, waitpid_options)
- except OSError, e: # No child processes
- if e[0] == errno.ECHILD:
- raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
- else:
- raise e
-
- # I have to do this twice for Solaris. I can't even believe that I figured this out...
- # If waitpid() returns 0 it means that no child process wishes to
- # report, and the value of status is undefined.
- if pid == 0:
- try:
- pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
- except OSError, e: # This should never happen...
- if e[0] == errno.ECHILD:
- raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
- else:
- raise e
-
- # If pid is still 0 after two calls to waitpid() then
- # the process really is alive. This seems to work on all platforms, except
- # for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
- # take care of this situation (unfortunately, this requires waiting through the timeout).
- if pid == 0:
- return True
-
- if pid == 0:
- return True
-
- if os.WIFEXITED (status):
- self.status = status
- self.exitstatus = os.WEXITSTATUS(status)
- self.signalstatus = None
- self.terminated = True
- elif os.WIFSIGNALED (status):
- self.status = status
- self.exitstatus = None
- self.signalstatus = os.WTERMSIG(status)
- self.terminated = True
- elif os.WIFSTOPPED (status):
- raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
- return False
-
- def kill(self, sig):
-
- """This sends the given signal to the child application. In keeping
- with UNIX tradition it has a misleading name. It does not necessarily
- kill the child unless you send the right signal. """
-
- # Same as os.kill, but the pid is given for you.
- if self.isalive():
- os.kill(self.pid, sig)
-
- def compile_pattern_list(self, patterns):
-
- """This compiles a pattern-string or a list of pattern-strings.
- Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
- those. Patterns may also be None which results in an empty list (you
- might do this if waiting for an EOF or TIMEOUT condition without
- expecting any pattern).
-
- This is used by expect() when calling expect_list(). Thus expect() is
- nothing more than::
-
- cpl = self.compile_pattern_list(pl)
- return self.expect_list(cpl, timeout)
-
- If you are using expect() within a loop it may be more
- efficient to compile the patterns first and then call expect_list().
- This avoid calls in a loop to compile_pattern_list()::
-
- cpl = self.compile_pattern_list(my_pattern)
- while some_condition:
- ...
- i = self.expect_list(clp, timeout)
- ...
- """
-
- if patterns is None:
- return []
- if type(patterns) is not types.ListType:
- patterns = [patterns]
-
- compile_flags = re.DOTALL # Allow dot to match \n
- if self.ignorecase:
- compile_flags = compile_flags | re.IGNORECASE
- compiled_pattern_list = []
- for p in patterns:
- if type(p) in types.StringTypes:
- compiled_pattern_list.append(re.compile(p, compile_flags))
- elif p is EOF:
- compiled_pattern_list.append(EOF)
- elif p is TIMEOUT:
- compiled_pattern_list.append(TIMEOUT)
- elif type(p) is type(re.compile('')):
- compiled_pattern_list.append(p)
- else:
- raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
-
- return compiled_pattern_list
-
- def expect(self, pattern, timeout = -1, searchwindowsize=None):
-
- """This seeks through the stream until a pattern is matched. The
- pattern is overloaded and may take several types. The pattern can be a
- StringType, EOF, a compiled re, or a list of any of those types.
- Strings will be compiled to re types. This returns the index into the
- pattern list. If the pattern was not a list this returns index 0 on a
- successful match. This may raise exceptions for EOF or TIMEOUT. To
- avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
- list. That will cause expect to match an EOF or TIMEOUT condition
- instead of raising an exception.
-
- If you pass a list of patterns and more than one matches, the first match
- in the stream is chosen. If more than one pattern matches at that point,
- the leftmost in the pattern list is chosen. For example::
-
- # the input is 'foobar'
- index = p.expect (['bar', 'foo', 'foobar'])
- # returns 1 ('foo') even though 'foobar' is a "better" match
-
- Please note, however, that buffering can affect this behavior, since
- input arrives in unpredictable chunks. For example::
-
- # the input is 'foobar'
- index = p.expect (['foobar', 'foo'])
- # returns 0 ('foobar') if all input is available at once,
- # but returs 1 ('foo') if parts of the final 'bar' arrive late
-
- After a match is found the instance attributes 'before', 'after' and
- 'match' will be set. You can see all the data read before the match in
- 'before'. You can see the data that was matched in 'after'. The
- re.MatchObject used in the re match will be in 'match'. If an error
- occurred then 'before' will be set to all the data read so far and
- 'after' and 'match' will be None.
-
- If timeout is -1 then timeout will be set to the self.timeout value.
-
- A list entry may be EOF or TIMEOUT instead of a string. This will
- catch these exceptions and return the index of the list entry instead
- of raising the exception. The attribute 'after' will be set to the
- exception type. The attribute 'match' will be None. This allows you to
- write code like this::
-
- index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
- if index == 0:
- do_something()
- elif index == 1:
- do_something_else()
- elif index == 2:
- do_some_other_thing()
- elif index == 3:
- do_something_completely_different()
-
- instead of code like this::
-
- try:
- index = p.expect (['good', 'bad'])
- if index == 0:
- do_something()
- elif index == 1:
- do_something_else()
- except EOF:
- do_some_other_thing()
- except TIMEOUT:
- do_something_completely_different()
-
- These two forms are equivalent. It all depends on what you want. You
- can also just expect the EOF if you are waiting for all output of a
- child to finish. For example::
-
- p = pexpect.spawn('/bin/ls')
- p.expect (pexpect.EOF)
- print p.before
-
- If you are trying to optimize for speed then see expect_list().
- """
-
- compiled_pattern_list = self.compile_pattern_list(pattern)
- return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
-
- def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
- """This takes a list of compiled regular expressions and returns the
- index into the pattern_list that matched the child output. The list may
- also contain EOF or TIMEOUT (which are not compiled regular
- expressions). This method is similar to the expect() method except that
- expect_list() does not recompile the pattern list on every call. This
- may help if you are trying to optimize for speed, otherwise just use
- the expect() method. This is called by expect(). If timeout==-1 then
- the self.timeout value is used. If searchwindowsize==-1 then the
- self.searchwindowsize value is used. """
-
- return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
-
- def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
-
- """This is similar to expect(), but uses plain string matching instead
- of compiled regular expressions in 'pattern_list'. The 'pattern_list'
- may be a string; a list or other sequence of strings; or TIMEOUT and
- EOF.
-
- This call might be faster than expect() for two reasons: string
- searching is faster than RE matching and it is possible to limit the
- search to just the end of the input buffer.
-
- This method is also useful when you don't want to have to worry about
- escaping regular expression characters that you want to match."""
-
- if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
- pattern_list = [pattern_list]
- return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
-
- def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
-
- """This is the common loop used inside expect. The 'searcher' should be
- an instance of searcher_re or searcher_string, which describes how and what
- to search for in the input.
-
- See expect() for other arguments, return value and exceptions. """
-
- self.searcher = searcher
-
- if timeout == -1:
- timeout = self.timeout
- if timeout is not None:
- end_time = time.time() + timeout
- if searchwindowsize == -1:
- searchwindowsize = self.searchwindowsize
-
- try:
- incoming = self.buffer
- freshlen = len(incoming)
- while True: # Keep reading until exception or return.
- index = searcher.search(incoming, freshlen, searchwindowsize)
- if index >= 0:
- self.buffer = incoming[searcher.end : ]
- self.before = incoming[ : searcher.start]
- self.after = incoming[searcher.start : searcher.end]
- self.match = searcher.match
- self.match_index = index
- return self.match_index
- # No match at this point
- if timeout < 0 and timeout is not None:
- raise TIMEOUT ('Timeout exceeded in expect_any().')
- # Still have time left, so read more data
- c = self.read_nonblocking (self.maxread, timeout)
- freshlen = len(c)
- time.sleep (0.0001)
- incoming = incoming + c
- if timeout is not None:
- timeout = end_time - time.time()
- except EOF, e:
- self.buffer = ''
- self.before = incoming
- self.after = EOF
- index = searcher.eof_index
- if index >= 0:
- self.match = EOF
- self.match_index = index
- return self.match_index
- else:
- self.match = None
- self.match_index = None
- raise EOF (str(e) + '\n' + str(self))
- except TIMEOUT, e:
- self.buffer = incoming
- self.before = incoming
- self.after = TIMEOUT
- index = searcher.timeout_index
- if index >= 0:
- self.match = TIMEOUT
- self.match_index = index
- return self.match_index
- else:
- self.match = None
- self.match_index = None
- raise TIMEOUT (str(e) + '\n' + str(self))
- except:
- self.before = incoming
- self.after = None
- self.match = None
- self.match_index = None
- raise
-
- def getwinsize(self):
-
- """This returns the terminal window size of the child tty. The return
- value is a tuple of (rows, cols). """
-
- TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
- s = struct.pack('HHHH', 0, 0, 0, 0)
- x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
- return struct.unpack('HHHH', x)[0:2]
-
- def setwinsize(self, r, c):
-
- """This sets the terminal window size of the child tty. This will cause
- a SIGWINCH signal to be sent to the child. This does not change the
- physical window size. It changes the size reported to TTY-aware
- applications like vi or curses -- applications that respond to the
- SIGWINCH signal. """
-
- # Check for buggy platforms. Some Python versions on some platforms
- # (notably OSF1 Alpha and RedHat 7.1) truncate the value for
- # termios.TIOCSWINSZ. It is not clear why this happens.
- # These platforms don't seem to handle the signed int very well;
- # yet other platforms like OpenBSD have a large negative value for
- # TIOCSWINSZ and they don't have a truncate problem.
- # Newer versions of Linux have totally different values for TIOCSWINSZ.
- # Note that this fix is a hack.
- TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
- if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
- TIOCSWINSZ = -2146929561 # Same bits, but with sign.
- # Note, assume ws_xpixel and ws_ypixel are zero.
- s = struct.pack('HHHH', r, c, 0, 0)
- fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
-
- def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
-
- """This gives control of the child process to the interactive user (the
- human at the keyboard). Keystrokes are sent to the child process, and
- the stdout and stderr output of the child process is printed. This
- simply echos the child stdout and child stderr to the real stdout and
- it echos the real stdin to the child stdin. When the user types the
- escape_character this method will stop. The default for
- escape_character is ^]. This should not be confused with ASCII 27 --
- the ESC character. ASCII 29 was chosen for historical merit because
- this is the character used by 'telnet' as the escape character. The
- escape_character will not be sent to the child process.
-
- You may pass in optional input and output filter functions. These
- functions should take a string and return a string. The output_filter
- will be passed all the output from the child process. The input_filter
- will be passed all the keyboard input from the user. The input_filter
- is run BEFORE the check for the escape_character.
-
- Note that if you change the window size of the parent the SIGWINCH
- signal will not be passed through to the child. If you want the child
- window size to change when the parent's window size changes then do
- something like the following example::
-
- import pexpect, struct, fcntl, termios, signal, sys
- def sigwinch_passthrough (sig, data):
- s = struct.pack("HHHH", 0, 0, 0, 0)
- a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
- global p
- p.setwinsize(a[0],a[1])
- p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
- signal.signal(signal.SIGWINCH, sigwinch_passthrough)
- p.interact()
- """
-
- # Flush the buffer.
- self.stdout.write (self.buffer)
- self.stdout.flush()
- self.buffer = ''
- mode = tty.tcgetattr(self.STDIN_FILENO)
- tty.setraw(self.STDIN_FILENO)
- try:
- self.__interact_copy(escape_character, input_filter, output_filter)
- finally:
- tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
-
- def __interact_writen(self, fd, data):
-
- """This is used by the interact() method.
- """
-
- while data != '' and self.isalive():
- n = os.write(fd, data)
- data = data[n:]
-
- def __interact_read(self, fd):
-
- """This is used by the interact() method.
- """
-
- return os.read(fd, 1000)
-
- def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
-
- """This is used by the interact() method.
- """
-
- while self.isalive():
- r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
- if self.child_fd in r:
- data = self.__interact_read(self.child_fd)
- if output_filter: data = output_filter(data)
- if self.logfile is not None:
- self.logfile.write (data)
- self.logfile.flush()
- os.write(self.STDOUT_FILENO, data)
- if self.STDIN_FILENO in r:
- data = self.__interact_read(self.STDIN_FILENO)
- if input_filter: data = input_filter(data)
- i = data.rfind(escape_character)
- if i != -1:
- data = data[:i]
- self.__interact_writen(self.child_fd, data)
- break
- self.__interact_writen(self.child_fd, data)
-
- def __select (self, iwtd, owtd, ewtd, timeout=None):
-
- """This is a wrapper around select.select() that ignores signals. If
- select.select raises a select.error exception and errno is an EINTR
- error then it is ignored. Mainly this is used to ignore sigwinch
- (terminal resize). """
-
- # if select() is interrupted by a signal (errno==EINTR) then
- # we loop back and enter the select() again.
- if timeout is not None:
- end_time = time.time() + timeout
- while True:
- try:
- return select.select (iwtd, owtd, ewtd, timeout)
- except select.error, e:
- if e[0] == errno.EINTR:
- # if we loop back we have to subtract the amount of time we already waited.
- if timeout is not None:
- timeout = end_time - time.time()
- if timeout < 0:
- return ([],[],[])
- else: # something else caused the select.error, so this really is an exception
- raise
-
-##############################################################################
-# The following methods are no longer supported or allowed.
-
- def setmaxread (self, maxread):
-
- """This method is no longer supported or allowed. I don't like getters
- and setters without a good reason. """
-
- raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
-
- def setlog (self, fileobject):
-
- """This method is no longer supported or allowed.
- """
-
- raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
-
-##############################################################################
-# End of spawn class
-##############################################################################
-
-class searcher_string (object):
-
- """This is a plain string search helper for the spawn.expect_any() method.
-
- Attributes:
-
- eof_index - index of EOF, or -1
- timeout_index - index of TIMEOUT, or -1
-
- After a successful match by the search() method the following attributes
- are available:
-
- start - index into the buffer, first byte of match
- end - index into the buffer, first byte after match
- match - the matching string itself
- """
-
- def __init__(self, strings):
-
- """This creates an instance of searcher_string. This argument 'strings'
- may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
-
- self.eof_index = -1
- self.timeout_index = -1
- self._strings = []
- for n, s in zip(range(len(strings)), strings):
- if s is EOF:
- self.eof_index = n
- continue
- if s is TIMEOUT:
- self.timeout_index = n
- continue
- self._strings.append((n, s))
-
- def __str__(self):
-
- """This returns a human-readable string that represents the state of
- the object."""
-
- ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
- ss.append((-1,'searcher_string:'))
- if self.eof_index >= 0:
- ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
- if self.timeout_index >= 0:
- ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
- ss.sort()
- ss = zip(*ss)[1]
- return '\n'.join(ss)
-
- def search(self, buffer, freshlen, searchwindowsize=None):
-
- """This searches 'buffer' for the first occurence of one of the search
- strings. 'freshlen' must indicate the number of bytes at the end of
- 'buffer' which have not been searched before. It helps to avoid
- searching the same, possibly big, buffer over and over again.
-
- See class spawn for the 'searchwindowsize' argument.
-
- If there is a match this returns the index of that string, and sets
- 'start', 'end' and 'match'. Otherwise, this returns -1. """
-
- absurd_match = len(buffer)
- first_match = absurd_match
-
- # 'freshlen' helps a lot here. Further optimizations could
- # possibly include:
- #
- # using something like the Boyer-Moore Fast String Searching
- # Algorithm; pre-compiling the search through a list of
- # strings into something that can scan the input once to
- # search for all N strings; realize that if we search for
- # ['bar', 'baz'] and the input is '...foo' we need not bother
- # rescanning until we've read three more bytes.
- #
- # Sadly, I don't know enough about this interesting topic. /grahn
-
- for index, s in self._strings:
- if searchwindowsize is None:
- # the match, if any, can only be in the fresh data,
- # or at the very end of the old data
- offset = -(freshlen+len(s))
- else:
- # better obey searchwindowsize
- offset = -searchwindowsize
- n = buffer.find(s, offset)
- if n >= 0 and n < first_match:
- first_match = n
- best_index, best_match = index, s
- if first_match == absurd_match:
- return -1
- self.match = best_match
- self.start = first_match
- self.end = self.start + len(self.match)
- return best_index
-
-class searcher_re (object):
-
- """This is regular expression string search helper for the
- spawn.expect_any() method.
-
- Attributes:
-
- eof_index - index of EOF, or -1
- timeout_index - index of TIMEOUT, or -1
-
- After a successful match by the search() method the following attributes
- are available:
-
- start - index into the buffer, first byte of match
- end - index into the buffer, first byte after match
- match - the re.match object returned by a succesful re.search
-
- """
-
- def __init__(self, patterns):
-
- """This creates an instance that searches for 'patterns' Where
- 'patterns' may be a list or other sequence of compiled regular
- expressions, or the EOF or TIMEOUT types."""
-
- self.eof_index = -1
- self.timeout_index = -1
- self._searches = []
- for n, s in zip(range(len(patterns)), patterns):
- if s is EOF:
- self.eof_index = n
- continue
- if s is TIMEOUT:
- self.timeout_index = n
- continue
- self._searches.append((n, s))
-
- def __str__(self):
-
- """This returns a human-readable string that represents the state of
- the object."""
-
- ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
- ss.append((-1,'searcher_re:'))
- if self.eof_index >= 0:
- ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
- if self.timeout_index >= 0:
- ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
- ss.sort()
- ss = zip(*ss)[1]
- return '\n'.join(ss)
-
- def search(self, buffer, freshlen, searchwindowsize=None):
-
- """This searches 'buffer' for the first occurence of one of the regular
- expressions. 'freshlen' must indicate the number of bytes at the end of
- 'buffer' which have not been searched before.
-
- See class spawn for the 'searchwindowsize' argument.
-
- If there is a match this returns the index of that string, and sets
- 'start', 'end' and 'match'. Otherwise, returns -1."""
-
- absurd_match = len(buffer)
- first_match = absurd_match
- # 'freshlen' doesn't help here -- we cannot predict the
- # length of a match, and the re module provides no help.
- if searchwindowsize is None:
- searchstart = 0
- else:
- searchstart = max(0, len(buffer)-searchwindowsize)
- for index, s in self._searches:
- match = s.search(buffer, searchstart)
- if match is None:
- continue
- n = match.start()
- if n < first_match:
- first_match = n
- the_match = match
- best_index = index
- if first_match == absurd_match:
- return -1
- self.start = first_match
- self.match = the_match
- self.end = self.match.end()
- return best_index
-
-def which (filename):
-
- """This takes a given filename; tries to find it in the environment path;
- then checks if it is executable. This returns the full path to the filename
- if found and executable. Otherwise this returns None."""
-
- # Special case where filename already contains a path.
- if os.path.dirname(filename) != '':
- if os.access (filename, os.X_OK):
- return filename
-
- if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
- p = os.defpath
- else:
- p = os.environ['PATH']
-
- # Oddly enough this was the one line that made Pexpect
- # incompatible with Python 1.5.2.
- #pathlist = p.split (os.pathsep)
- pathlist = string.split (p, os.pathsep)
-
- for path in pathlist:
- f = os.path.join(path, filename)
- if os.access(f, os.X_OK):
- return f
- return None
-
-def split_command_line(command_line):
-
- """This splits a command line into a list of arguments. It splits arguments
- on spaces, but handles embedded quotes, doublequotes, and escaped
- characters. It's impossible to do this with a regular expression, so I
- wrote a little state machine to parse the command line. """
-
- arg_list = []
- arg = ''
-
- # Constants to name the states we can be in.
- state_basic = 0
- state_esc = 1
- state_singlequote = 2
- state_doublequote = 3
- state_whitespace = 4 # The state of consuming whitespace between commands.
- state = state_basic
-
- for c in command_line:
- if state == state_basic or state == state_whitespace:
- if c == '\\': # Escape the next character
- state = state_esc
- elif c == r"'": # Handle single quote
- state = state_singlequote
- elif c == r'"': # Handle double quote
- state = state_doublequote
- elif c.isspace():
- # Add arg to arg_list if we aren't in the middle of whitespace.
- if state == state_whitespace:
- None # Do nothing.
- else:
- arg_list.append(arg)
- arg = ''
- state = state_whitespace
- else:
- arg = arg + c
- state = state_basic
- elif state == state_esc:
- arg = arg + c
- state = state_basic
- elif state == state_singlequote:
- if c == r"'":
- state = state_basic
- else:
- arg = arg + c
- elif state == state_doublequote:
- if c == r'"':
- state = state_basic
- else:
- arg = arg + c
-
- if arg != '':
- arg_list.append(arg)
- return arg_list
-
-# vi:ts=4:sw=4:expandtab:ft=python:
https://bitbucket.org/yt_analysis/yt/changeset/6a5826b13864/
changeset: 6a5826b13864
branch: yt
user: MatthewTurk
date: 2011-11-23 22:26:00
summary: Few more fixed, removing cmdln.py
affected #: 4 files
diff -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a -r 6a5826b1386431aa99452f9fb5edd666cf4ed655 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -421,7 +421,7 @@
class YTEmptyClass(object):
pass
-def _update_hg(path, skip_rebuild = False):
+def update_hg(path, skip_rebuild = False):
from mercurial import hg, ui, commands
f = open(os.path.join(path, "yt_updater.log"), "a")
u = ui.ui()
@@ -459,7 +459,7 @@
f.write("Successful!\n")
print "Updated successfully."
-def _get_hg_version(path):
+def get_hg_version(path):
from mercurial import hg, ui, commands
u = ui.ui()
u.pushbuffer()
@@ -467,7 +467,7 @@
commands.identify(u, repo)
return u.popbuffer()
-def _get_yt_version():
+def get_yt_version():
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
@@ -475,7 +475,7 @@
return version
# This code snippet is modified from Georg Brandl
-def _bb_apicall(endpoint, data, use_pass = True):
+def bb_apicall(endpoint, data, use_pass = True):
import urllib, urllib2
uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
# since bitbucket doesn't return the required WWW-Authenticate header when
@@ -491,7 +491,7 @@
req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
return urllib2.urlopen(req).read()
-def _get_yt_supp():
+def get_yt_supp():
supp_path = os.path.join(os.environ["YT_DEST"], "src",
"yt-supplemental")
# Now we check that the supplemental repository is checked out.
diff -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a -r 6a5826b1386431aa99452f9fb5edd666cf4ed655 yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -238,8 +238,8 @@
import pkg_resources
yt_provider = pkg_resources.get_provider("yt")
path = os.path.dirname(yt_provider.module_path)
- from yt.utilities.command_line import _get_hg_version
- comparison = _get_hg_version(path)[:12]
+ from yt.funcs import get_hg_version
+ comparison = get_hg_version(path)[:12]
print "Setting comparison to: %s" % (comparison)
sys.path.insert(0, ".")
self._load_modules(test_modules)
diff -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a -r 6a5826b1386431aa99452f9fb5edd666cf4ed655 yt/utilities/cmdln.py
--- a/yt/utilities/cmdln.py
+++ /dev/null
@@ -1,1586 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2002-2007 ActiveState Software Inc.
-# License: MIT (see LICENSE.txt for license details)
-# Author: Trent Mick
-# Home: http://trentm.com/projects/cmdln/
-
-"""An improvement on Python's standard cmd.py module.
-
-As with cmd.py, this module provides "a simple framework for writing
-line-oriented command intepreters." This module provides a 'RawCmdln'
-class that fixes some design flaws in cmd.Cmd, making it more scalable
-and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
-or simple shells. And it provides a 'Cmdln' class that add
-optparse-based option processing. Basically you use it like this:
-
- import cmdln
-
- class MySVN(cmdln.Cmdln):
- name = "svn"
-
- @cmdln.alias('stat', 'st')
- @cmdln.option('-v', '--verbose', action='store_true'
- help='print verbose information')
- def do_status(self, subcmd, opts, *paths):
- print "handle 'svn status' command"
-
- #...
-
- if __name__ == "__main__":
- shell = MySVN()
- retval = shell.main()
- sys.exit(retval)
-
-See the README.txt or <http://trentm.com/projects/cmdln/> for more
-details.
-"""
-
-__version_info__ = (1, 1, 2)
-__version__ = '.'.join(map(str, __version_info__))
-
-import os
-import sys
-import re
-import cmd
-import optparse
-from pprint import pprint
-import sys
-
-
-
-
-#---- globals
-
-LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
-
-# An unspecified optional argument when None is a meaningful value.
-_NOT_SPECIFIED = ("Not", "Specified")
-
-# Pattern to match a TypeError message from a call that
-# failed because of incorrect number of arguments (see
-# Python/getargs.c).
-_INCORRECT_NUM_ARGS_RE = re.compile(
- r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
-
-
-
-#---- exceptions
-
-class CmdlnError(Exception):
- """A cmdln.py usage error."""
- def __init__(self, msg):
- self.msg = msg
- def __str__(self):
- return self.msg
-
-class CmdlnUserError(Exception):
- """An error by a user of a cmdln-based tool/shell."""
- pass
-
-
-
-#---- public methods and classes
-
-def alias(*aliases):
- """Decorator to add aliases for Cmdln.do_* command handlers.
-
- Example:
- class MyShell(cmdln.Cmdln):
- @cmdln.alias("!", "sh")
- def do_shell(self, argv):
- #...implement 'shell' command
- """
- def decorate(f):
- if not hasattr(f, "aliases"):
- f.aliases = []
- f.aliases += aliases
- return f
- return decorate
-
-
-class RawCmdln(cmd.Cmd):
- """An improved (on cmd.Cmd) framework for building multi-subcommand
- scripts (think "svn" & "cvs") and simple shells (think "pdb" and
- "gdb").
-
- A simple example:
-
- import cmdln
-
- class MySVN(cmdln.RawCmdln):
- name = "svn"
-
- @cmdln.aliases('stat', 'st')
- def do_status(self, argv):
- print "handle 'svn status' command"
-
- if __name__ == "__main__":
- shell = MySVN()
- retval = shell.main()
- sys.exit(retval)
-
- See <http://trentm.com/projects/cmdln> for more information.
- """
- name = None # if unset, defaults basename(sys.argv[0])
- prompt = None # if unset, defaults to self.name+"> "
- version = None # if set, default top-level options include --version
-
- # Default messages for some 'help' command error cases.
- # They are interpolated with one arg: the command.
- nohelp = "no help on '%s'"
- unknowncmd = "unknown command: '%s'"
-
- helpindent = '' # string with which to indent help output
-
- def __init__(self, completekey='tab',
- stdin=None, stdout=None, stderr=None):
- """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
-
- The optional argument 'completekey' is the readline name of a
- completion key; it defaults to the Tab key. If completekey is
- not None and the readline module is available, command completion
- is done automatically.
-
- The optional arguments 'stdin', 'stdout' and 'stderr' specify
- alternate input, output and error output file objects; if not
- specified, sys.* are used.
-
- If 'stdout' but not 'stderr' is specified, stdout is used for
- error output. This is to provide least surprise for users used
- to only the 'stdin' and 'stdout' options with cmd.Cmd.
- """
- import sys
- if self.name is None:
- self.name = os.path.basename(sys.argv[0])
- if self.prompt is None:
- self.prompt = self.name+"> "
- self._name_str = self._str(self.name)
- self._prompt_str = self._str(self.prompt)
- if stdin is not None:
- self.stdin = stdin
- else:
- self.stdin = sys.stdin
- if stdout is not None:
- self.stdout = stdout
- else:
- self.stdout = sys.stdout
- if stderr is not None:
- self.stderr = stderr
- elif stdout is not None:
- self.stderr = stdout
- else:
- self.stderr = sys.stderr
- self.cmdqueue = []
- self.completekey = completekey
- self.cmdlooping = False
-
- def get_optparser(self):
- """Hook for subclasses to set the option parser for the
- top-level command/shell.
-
- This option parser is used retrieved and used by `.main()' to
- handle top-level options.
-
- The default implements a single '-h|--help' option. Sub-classes
- can return None to have no options at the top-level. Typically
- an instance of CmdlnOptionParser should be returned.
- """
- version = (self.version is not None
- and "%s %s" % (self._name_str, self.version)
- or None)
- return CmdlnOptionParser(self, version=version)
-
- def postoptparse(self):
- """Hook method executed just after `.main()' parses top-level
- options.
-
- When called `self.options' holds the results of the option parse.
- """
- pass
-
- def main(self, argv=None, loop=LOOP_NEVER):
- """A possible mainline handler for a script, like so:
-
- import cmdln
- class MyCmd(cmdln.Cmdln):
- name = "mycmd"
- ...
-
- if __name__ == "__main__":
- MyCmd().main()
-
- By default this will use sys.argv to issue a single command to
- 'MyCmd', then exit. The 'loop' argument can be use to control
- interactive shell behaviour.
-
- Arguments:
- "argv" (optional, default sys.argv) is the command to run.
- It must be a sequence, where the first element is the
- command name and subsequent elements the args for that
- command.
- "loop" (optional, default LOOP_NEVER) is a constant
- indicating if a command loop should be started (i.e. an
- interactive shell). Valid values (constants on this module):
- LOOP_ALWAYS start loop and run "argv", if any
- LOOP_NEVER run "argv" (or .emptyline()) and exit
- LOOP_IF_EMPTY run "argv", if given, and exit;
- otherwise, start loop
- """
- if argv is None:
- import sys
- argv = sys.argv
- else:
- argv = argv[:] # don't modify caller's list
-
- self.optparser = self.get_optparser()
- if self.optparser: # i.e. optparser=None means don't process for opts
- try:
- self.options, args = self.optparser.parse_args(argv[1:])
- except CmdlnUserError, ex:
- msg = "%s: %s\nTry '%s help' for info.\n"\
- % (self.name, ex, self.name)
- self.stderr.write(self._str(msg))
- self.stderr.flush()
- return 1
- except StopOptionProcessing, ex:
- return 0
- else:
- self.options, args = None, argv[1:]
- self.postoptparse()
-
- if loop == LOOP_ALWAYS:
- if args:
- self.cmdqueue.append(args)
- return self.cmdloop()
- elif loop == LOOP_NEVER:
- if args:
- return self.cmd(args)
- else:
- return self.emptyline()
- elif loop == LOOP_IF_EMPTY:
- if args:
- return self.cmd(args)
- else:
- return self.cmdloop()
-
- def cmd(self, argv):
- """Run one command and exit.
-
- "argv" is the arglist for the command to run. argv[0] is the
- command to run. If argv is an empty list then the
- 'emptyline' handler is run.
-
- Returns the return value from the command handler.
- """
- assert isinstance(argv, (list, tuple)), \
- "'argv' is not a sequence: %r" % argv
- retval = None
- try:
- argv = self.precmd(argv)
- retval = self.onecmd(argv)
- self.postcmd(argv)
- except:
- if not self.cmdexc(argv):
- raise
- retval = 1
- return retval
-
- def _str(self, s):
- """Safely convert the given str/unicode to a string for printing."""
- try:
- return str(s)
- except UnicodeError:
- #XXX What is the proper encoding to use here? 'utf-8' seems
- # to work better than "getdefaultencoding" (usually
- # 'ascii'), on OS X at least.
- #import sys
- #return s.encode(sys.getdefaultencoding(), "replace")
- return s.encode("utf-8", "replace")
-
- def cmdloop(self, intro=None):
- """Repeatedly issue a prompt, accept input, parse into an argv, and
- dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
- the argv. In other words, start a shell.
-
- "intro" (optional) is a introductory message to print when
- starting the command loop. This overrides the class
- "intro" attribute, if any.
- """
- self.cmdlooping = True
- self.preloop()
- if self.use_rawinput and self.completekey:
- try:
- import readline
- self.old_completer = readline.get_completer()
- readline.set_completer(self.complete)
- readline.parse_and_bind(self.completekey+": complete")
- except ImportError:
- pass
- try:
- if intro is None:
- intro = self.intro
- if intro:
- intro_str = self._str(intro)
- self.stdout.write(intro_str+'\n')
- self.stop = False
- retval = None
- while not self.stop:
- if self.cmdqueue:
- argv = self.cmdqueue.pop(0)
- assert isinstance(argv, (list, tuple)), \
- "item on 'cmdqueue' is not a sequence: %r" % argv
- else:
- if self.use_rawinput:
- try:
- line = raw_input(self._prompt_str)
- except EOFError:
- line = 'EOF'
- else:
- self.stdout.write(self._prompt_str)
- self.stdout.flush()
- line = self.stdin.readline()
- if not len(line):
- line = 'EOF'
- else:
- line = line[:-1] # chop '\n'
- argv = line2argv(line)
- try:
- argv = self.precmd(argv)
- retval = self.onecmd(argv)
- self.postcmd(argv)
- except:
- if not self.cmdexc(argv):
- raise
- retval = 1
- self.lastretval = retval
- self.postloop()
- finally:
- if self.use_rawinput and self.completekey:
- try:
- import readline
- readline.set_completer(self.old_completer)
- except ImportError:
- pass
- self.cmdlooping = False
- return retval
-
- def precmd(self, argv):
- """Hook method executed just before the command argv is
- interpreted, but after the input prompt is generated and issued.
-
- "argv" is the cmd to run.
-
- Returns an argv to run (i.e. this method can modify the command
- to run).
- """
- return argv
-
- def postcmd(self, argv):
- """Hook method executed just after a command dispatch is finished.
-
- "argv" is the command that was run.
- """
- pass
-
- def cmdexc(self, argv):
- """Called if an exception is raised in any of precmd(), onecmd(),
- or postcmd(). If True is returned, the exception is deemed to have
- been dealt with. Otherwise, the exception is re-raised.
-
- The default implementation handles CmdlnUserError's, which
- typically correspond to user error in calling commands (as
- opposed to programmer error in the design of the script using
- cmdln.py).
- """
- import sys
- type, exc, traceback = sys.exc_info()
- if isinstance(exc, CmdlnUserError):
- msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
- % (self.name, argv[0], exc, self.name, argv[0])
- self.stderr.write(self._str(msg))
- self.stderr.flush()
- return True
-
- def onecmd(self, argv):
- if not argv:
- return self.emptyline()
- self.lastcmd = argv
- cmdname = self._get_canonical_cmd_name(argv[0])
- if cmdname:
- handler = self._get_cmd_handler(cmdname)
- if handler:
- return self._dispatch_cmd(handler, argv)
- return self.default(argv)
-
- def _dispatch_cmd(self, handler, argv):
- return handler(argv)
-
- def default(self, argv):
- """Hook called to handle a command for which there is no handler.
-
- "argv" is the command and arguments to run.
-
- The default implementation writes and error message to stderr
- and returns an error exit status.
-
- Returns a numeric command exit status.
- """
- errmsg = self._str(self.unknowncmd % (argv[0],))
- if self.cmdlooping:
- self.stderr.write(errmsg+"\n")
- else:
- self.stderr.write("%s: %s\nTry '%s help' for info.\n"
- % (self._name_str, errmsg, self._name_str))
- self.stderr.flush()
- return 1
-
- def parseline(self, line):
- # This is used by Cmd.complete (readline completer function) to
- # massage the current line buffer before completion processing.
- # We override to drop special '!' handling.
- line = line.strip()
- if not line:
- return None, None, line
- elif line[0] == '?':
- line = 'help ' + line[1:]
- i, n = 0, len(line)
- while i < n and line[i] in self.identchars: i = i+1
- cmd, arg = line[:i], line[i:].strip()
- return cmd, arg, line
-
- def helpdefault(self, cmd, known):
- """Hook called to handle help on a command for which there is no
- help handler.
-
- "cmd" is the command name on which help was requested.
- "known" is a boolean indicating if this command is known
- (i.e. if there is a handler for it).
-
- Returns a return code.
- """
- if known:
- msg = self._str(self.nohelp % (cmd,))
- if self.cmdlooping:
- self.stderr.write(msg + '\n')
- else:
- self.stderr.write("%s: %s\n" % (self.name, msg))
- else:
- msg = self.unknowncmd % (cmd,)
- if self.cmdlooping:
- self.stderr.write(msg + '\n')
- else:
- self.stderr.write("%s: %s\n"
- "Try '%s help' for info.\n"
- % (self.name, msg, self.name))
- self.stderr.flush()
- return 1
-
- def do_help(self, argv):
- """${cmd_name}: give detailed help on a specific sub-command
-
- Usage:
- ${name} help [COMMAND]
- """
- if len(argv) > 1: # asking for help on a particular command
- doc = None
- cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
- if not cmdname:
- return self.helpdefault(argv[1], False)
- else:
- helpfunc = getattr(self, "help_"+cmdname, None)
- if helpfunc:
- doc = helpfunc()
- else:
- handler = self._get_cmd_handler(cmdname)
- if handler:
- doc = handler.__doc__
- if doc is None:
- return self.helpdefault(argv[1], handler != None)
- else: # bare "help" command
- doc = self.__class__.__doc__ # try class docstring
- if doc is None:
- # Try to provide some reasonable useful default help.
- if self.cmdlooping: prefix = ""
- else: prefix = self.name+' '
- doc = """Usage:
- %sCOMMAND [ARGS...]
- %shelp [COMMAND]
-
- ${option_list}
- ${command_list}
- ${help_list}
- """ % (prefix, prefix)
- cmdname = None
-
- if doc: # *do* have help content, massage and print that
- doc = self._help_reindent(doc)
- doc = self._help_preprocess(doc, cmdname)
- doc = doc.rstrip() + '\n' # trim down trailing space
- self.stdout.write(self._str(doc))
- self.stdout.flush()
- do_help.aliases = ["?"]
-
- def _help_reindent(self, help, indent=None):
- """Hook to re-indent help strings before writing to stdout.
-
- "help" is the help content to re-indent
- "indent" is a string with which to indent each line of the
- help content after normalizing. If unspecified or None
- then the default is use: the 'self.helpindent' class
- attribute. By default this is the empty string, i.e.
- no indentation.
-
- By default, all common leading whitespace is removed and then
- the lot is indented by 'self.helpindent'. When calculating the
- common leading whitespace the first line is ignored -- hence
- help content for Conan can be written as follows and have the
- expected indentation:
-
- def do_crush(self, ...):
- '''${cmd_name}: crush your enemies, see them driven before you...
-
- c.f. Conan the Barbarian'''
- """
- if indent is None:
- indent = self.helpindent
- lines = help.splitlines(0)
- _dedentlines(lines, skip_first_line=True)
- lines = [(indent+line).rstrip() for line in lines]
- return '\n'.join(lines)
-
- def _help_preprocess(self, help, cmdname):
- """Hook to preprocess a help string before writing to stdout.
-
- "help" is the help string to process.
- "cmdname" is the canonical sub-command name for which help
- is being given, or None if the help is not specific to a
- command.
-
- By default the following template variables are interpolated in
- help content. (Note: these are similar to Python 2.4's
- string.Template interpolation but not quite.)
-
- ${name}
- The tool's/shell's name, i.e. 'self.name'.
- ${option_list}
- A formatted table of options for this shell/tool.
- ${command_list}
- A formatted table of available sub-commands.
- ${help_list}
- A formatted table of additional help topics (i.e. 'help_*'
- methods with no matching 'do_*' method).
- ${cmd_name}
- The name (and aliases) for this sub-command formatted as:
- "NAME (ALIAS1, ALIAS2, ...)".
- ${cmd_usage}
- A formatted usage block inferred from the command function
- signature.
- ${cmd_option_list}
- A formatted table of options for this sub-command. (This is
- only available for commands using the optparse integration,
- i.e. using @cmdln.option decorators or manually setting the
- 'optparser' attribute on the 'do_*' method.)
-
- Returns the processed help.
- """
- preprocessors = {
- "${name}": self._help_preprocess_name,
- "${option_list}": self._help_preprocess_option_list,
- "${command_list}": self._help_preprocess_command_list,
- "${help_list}": self._help_preprocess_help_list,
- "${cmd_name}": self._help_preprocess_cmd_name,
- "${cmd_usage}": self._help_preprocess_cmd_usage,
- "${cmd_option_list}": self._help_preprocess_cmd_option_list,
- }
-
- for marker, preprocessor in preprocessors.items():
- if marker in help:
- help = preprocessor(help, cmdname)
- return help
-
- def _help_preprocess_name(self, help, cmdname=None):
- return help.replace("${name}", self.name)
-
- def _help_preprocess_option_list(self, help, cmdname=None):
- marker = "${option_list}"
- indent, indent_width = _get_indent(marker, help)
- suffix = _get_trailing_whitespace(marker, help)
-
- if self.optparser:
- # Setup formatting options and format.
- # - Indentation of 4 is better than optparse default of 2.
- # C.f. Damian Conway's discussion of this in Perl Best
- # Practices.
- self.optparser.formatter.indent_increment = 4
- self.optparser.formatter.current_indent = indent_width
- block = self.optparser.format_option_help() + '\n'
- else:
- block = ""
-
- help = help.replace(indent+marker+suffix, block, 1)
- return help
-
-
- def _help_preprocess_command_list(self, help, cmdname=None):
- marker = "${command_list}"
- indent, indent_width = _get_indent(marker, help)
- suffix = _get_trailing_whitespace(marker, help)
-
- # Find any aliases for commands.
- token2canonical = self._get_canonical_map()
- aliases = {}
- for token, cmdname in token2canonical.items():
- if token == cmdname: continue
- aliases.setdefault(cmdname, []).append(token)
-
- # Get the list of (non-hidden) commands and their
- # documentation, if any.
- cmdnames = {} # use a dict to strip duplicates
- for attr in self.get_names():
- if attr.startswith("do_"):
- cmdnames[attr[3:]] = True
- cmdnames = cmdnames.keys()
- cmdnames.sort()
- linedata = []
- for cmdname in cmdnames:
- if aliases.get(cmdname):
- a = aliases[cmdname]
- a.sort()
- cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
- else:
- cmdstr = cmdname
- doc = None
- try:
- helpfunc = getattr(self, 'help_'+cmdname)
- except AttributeError:
- handler = self._get_cmd_handler(cmdname)
- if handler:
- doc = handler.__doc__
- else:
- doc = helpfunc()
-
- # Strip "${cmd_name}: " from the start of a command's doc. Best
- # practice dictates that command help strings begin with this, but
- # it isn't at all wanted for the command list.
- to_strip = "${cmd_name}:"
- if doc and doc.startswith(to_strip):
- #log.debug("stripping %r from start of %s's help string",
- # to_strip, cmdname)
- doc = doc[len(to_strip):].lstrip()
- linedata.append( (cmdstr, doc) )
-
- if linedata:
- subindent = indent + ' '*4
- lines = _format_linedata(linedata, subindent, indent_width+4)
- block = indent + "Commands:\n" \
- + '\n'.join(lines) + "\n\n"
- help = help.replace(indent+marker+suffix, block, 1)
- return help
-
- def _gen_names_and_attrs(self):
- # Inheritance says we have to look in class and
- # base classes; order is not important.
- names = []
- classes = [self.__class__]
- while classes:
- aclass = classes.pop(0)
- if aclass.__bases__:
- classes = classes + list(aclass.__bases__)
- for name in dir(aclass):
- yield (name, getattr(aclass, name))
-
- def _help_preprocess_help_list(self, help, cmdname=None):
- marker = "${help_list}"
- indent, indent_width = _get_indent(marker, help)
- suffix = _get_trailing_whitespace(marker, help)
-
- # Determine the additional help topics, if any.
- helpnames = {}
- token2cmdname = self._get_canonical_map()
- for attrname, attr in self._gen_names_and_attrs():
- if not attrname.startswith("help_"): continue
- helpname = attrname[5:]
- if helpname not in token2cmdname:
- helpnames[helpname] = attr
-
- if helpnames:
- linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
- linedata.sort()
-
- subindent = indent + ' '*4
- lines = _format_linedata(linedata, subindent, indent_width+4)
- block = (indent
- + "Additional help topics (run `%s help TOPIC'):\n" % self.name
- + '\n'.join(lines)
- + "\n\n")
- else:
- block = ''
- help = help.replace(indent+marker+suffix, block, 1)
- return help
-
- def _help_preprocess_cmd_name(self, help, cmdname=None):
- marker = "${cmd_name}"
- handler = self._get_cmd_handler(cmdname)
- if not handler:
- raise CmdlnError("cannot preprocess '%s' into help string: "
- "could not find command handler for %r"
- % (marker, cmdname))
- s = cmdname
- if hasattr(handler, "aliases"):
- s += " (%s)" % (", ".join(handler.aliases))
- help = help.replace(marker, s)
- return help
-
- #TODO: this only makes sense as part of the Cmdln class.
- # Add hooks to add help preprocessing template vars and put
- # this one on that class.
- def _help_preprocess_cmd_usage(self, help, cmdname=None):
- marker = "${cmd_usage}"
- handler = self._get_cmd_handler(cmdname)
- if not handler:
- raise CmdlnError("cannot preprocess '%s' into help string: "
- "could not find command handler for %r"
- % (marker, cmdname))
- indent, indent_width = _get_indent(marker, help)
- suffix = _get_trailing_whitespace(marker, help)
-
- # Extract the introspection bits we need.
- func = handler.im_func
- if func.func_defaults:
- func_defaults = list(func.func_defaults)
- else:
- func_defaults = []
- co_argcount = func.func_code.co_argcount
- co_varnames = func.func_code.co_varnames
- co_flags = func.func_code.co_flags
- CO_FLAGS_ARGS = 4
- CO_FLAGS_KWARGS = 8
-
- # Adjust argcount for possible *args and **kwargs arguments.
- argcount = co_argcount
- if co_flags & CO_FLAGS_ARGS: argcount += 1
- if co_flags & CO_FLAGS_KWARGS: argcount += 1
-
- # Determine the usage string.
- usage = "%s %s" % (self.name, cmdname)
- if argcount <= 2: # handler ::= do_FOO(self, argv)
- usage += " [ARGS...]"
- elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
- argnames = list(co_varnames[3:argcount])
- tail = ""
- if co_flags & CO_FLAGS_KWARGS:
- name = argnames.pop(-1)
- import warnings
- # There is no generally accepted mechanism for passing
- # keyword arguments from the command line. Could
- # *perhaps* consider: arg=value arg2=value2 ...
- warnings.warn("argument '**%s' on '%s.%s' command "
- "handler will never get values"
- % (name, self.__class__.__name__,
- func.func_name))
- if co_flags & CO_FLAGS_ARGS:
- name = argnames.pop(-1)
- tail = "[%s...]" % name.upper()
- while func_defaults:
- func_defaults.pop(-1)
- name = argnames.pop(-1)
- tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
- while argnames:
- name = argnames.pop(-1)
- tail = "%s %s" % (name.upper(), tail)
- usage += ' ' + tail
-
- block_lines = [
- self.helpindent + "Usage:",
- self.helpindent + ' '*4 + usage
- ]
- block = '\n'.join(block_lines) + '\n\n'
-
- help = help.replace(indent+marker+suffix, block, 1)
- return help
-
- #TODO: this only makes sense as part of the Cmdln class.
- # Add hooks to add help preprocessing template vars and put
- # this one on that class.
- def _help_preprocess_cmd_option_list(self, help, cmdname=None):
- marker = "${cmd_option_list}"
- handler = self._get_cmd_handler(cmdname)
- if not handler:
- raise CmdlnError("cannot preprocess '%s' into help string: "
- "could not find command handler for %r"
- % (marker, cmdname))
- indent, indent_width = _get_indent(marker, help)
- suffix = _get_trailing_whitespace(marker, help)
- if hasattr(handler, "optparser"):
- # Setup formatting options and format.
- # - Indentation of 4 is better than optparse default of 2.
- # C.f. Damian Conway's discussion of this in Perl Best
- # Practices.
- handler.optparser.formatter.indent_increment = 4
- handler.optparser.formatter.current_indent = indent_width
- block = handler.optparser.format_option_help() + '\n'
- else:
- block = ""
-
- help = help.replace(indent+marker+suffix, block, 1)
- return help
-
- def _get_canonical_cmd_name(self, token):
- map = self._get_canonical_map()
- return map.get(token, None)
-
- def _get_canonical_map(self):
- """Return a mapping of available command names and aliases to
- their canonical command name.
- """
- cacheattr = "_token2canonical"
- if not hasattr(self, cacheattr):
- # Get the list of commands and their aliases, if any.
- token2canonical = {}
- cmd2funcname = {} # use a dict to strip duplicates
- for attr in self.get_names():
- if attr.startswith("do_"): cmdname = attr[3:]
- elif attr.startswith("_do_"): cmdname = attr[4:]
- else:
- continue
- cmd2funcname[cmdname] = attr
- token2canonical[cmdname] = cmdname
- for cmdname, funcname in cmd2funcname.items(): # add aliases
- func = getattr(self, funcname)
- aliases = getattr(func, "aliases", [])
- for alias in aliases:
- if alias in cmd2funcname:
- import warnings
- warnings.warn("'%s' alias for '%s' command conflicts "
- "with '%s' handler"
- % (alias, cmdname, cmd2funcname[alias]))
- continue
- token2canonical[alias] = cmdname
- setattr(self, cacheattr, token2canonical)
- return getattr(self, cacheattr)
-
- def _get_cmd_handler(self, cmdname):
- handler = None
- try:
- handler = getattr(self, 'do_' + cmdname)
- except AttributeError:
- try:
- # Private command handlers begin with "_do_".
- handler = getattr(self, '_do_' + cmdname)
- except AttributeError:
- pass
- return handler
-
- def _do_EOF(self, argv):
- # Default EOF handler
- # Note: an actual EOF is redirected to this command.
- #TODO: separate name for this. Currently it is available from
- # command-line. Is that okay?
- self.stdout.write('\n')
- self.stdout.flush()
- self.stop = True
-
- def emptyline(self):
- # Different from cmd.Cmd: don't repeat the last command for an
- # emptyline.
- if self.cmdlooping:
- pass
- else:
- return self.do_help(["help"])
-
-
-#---- optparse.py extension to fix (IMO) some deficiencies
-#
-# See the class _OptionParserEx docstring for details.
-#
-
-class StopOptionProcessing(Exception):
- """Indicate that option *and argument* processing should stop
- cleanly. This is not an error condition. It is similar in spirit to
- StopIteration. This is raised by _OptionParserEx's default "help"
- and "version" option actions and can be raised by custom option
- callbacks too.
-
- Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
- usage is:
-
- parser = CmdlnOptionParser(mycmd)
- parser.add_option("-f", "--force", dest="force")
- ...
- try:
- opts, args = parser.parse_args()
- except StopOptionProcessing:
- # normal termination, "--help" was probably given
- sys.exit(0)
- """
-
-class _OptionParserEx(optparse.OptionParser):
- """An optparse.OptionParser that uses exceptions instead of sys.exit.
-
- This class is an extension of optparse.OptionParser that differs
- as follows:
- - Correct (IMO) the default OptionParser error handling to never
- sys.exit(). Instead OptParseError exceptions are passed through.
- - Add the StopOptionProcessing exception (a la StopIteration) to
- indicate normal termination of option processing.
- See StopOptionProcessing's docstring for details.
-
- I'd also like to see the following in the core optparse.py, perhaps
- as a RawOptionParser which would serve as a base class for the more
- generally used OptionParser (that works as current):
- - Remove the implicit addition of the -h|--help and --version
- options. They can get in the way (e.g. if want '-?' and '-V' for
- these as well) and it is not hard to do:
- optparser.add_option("-h", "--help", action="help")
- optparser.add_option("--version", action="version")
- These are good practices, just not valid defaults if they can
- get in the way.
- """
- def error(self, msg):
- raise optparse.OptParseError(msg)
-
- def exit(self, status=0, msg=None):
- if status == 0:
- raise StopOptionProcessing(msg)
- else:
- #TODO: don't lose status info here
- raise optparse.OptParseError(msg)
-
-
-
-#---- optparse.py-based option processing support
-
-class CmdlnOptionParser(_OptionParserEx):
- """An optparse.OptionParser class more appropriate for top-level
- Cmdln options. For parsing of sub-command options, see
- SubCmdOptionParser.
-
- Changes:
- - disable_interspersed_args() by default, because a Cmdln instance
- has sub-commands which may themselves have options.
- - Redirect print_help() to the Cmdln.do_help() which is better
- equiped to handle the "help" action.
- - error() will raise a CmdlnUserError: OptionParse.error() is meant
- to be called for user errors. Raising a well-known error here can
- make error handling clearer.
- - Also see the changes in _OptionParserEx.
- """
- def __init__(self, cmdln, **kwargs):
- self.cmdln = cmdln
- kwargs["prog"] = self.cmdln.name
- _OptionParserEx.__init__(self, **kwargs)
- self.disable_interspersed_args()
-
- def print_help(self, file=None):
- self.cmdln.onecmd(["help"])
-
- def error(self, msg):
- raise CmdlnUserError(msg)
-
-
-class SubCmdOptionParser(_OptionParserEx):
- def set_cmdln_info(self, cmdln, subcmd):
- """Called by Cmdln to pass relevant info about itself needed
- for print_help().
- """
- self.cmdln = cmdln
- self.subcmd = subcmd
-
- def print_help(self, file=None):
- self.cmdln.onecmd(["help", self.subcmd])
-
- def error(self, msg):
- raise CmdlnUserError(msg)
-
-
-def option(*args, **kwargs):
- """Decorator to add an option to the optparser argument of a Cmdln
- subcommand.
-
- Example:
- class MyShell(cmdln.Cmdln):
- @cmdln.option("-f", "--force", help="force removal")
- def do_remove(self, subcmd, opts, *args):
- #...
- """
- #XXX Is there a possible optimization for many options to not have a
- # large stack depth here?
- def decorate(f):
- if not hasattr(f, "optparser"):
- f.optparser = SubCmdOptionParser()
- f.optparser.add_option(*args, **kwargs)
- return f
- return decorate
-
-
-class Cmdln(RawCmdln):
- """An improved (on cmd.Cmd) framework for building multi-subcommand
- scripts (think "svn" & "cvs") and simple shells (think "pdb" and
- "gdb").
-
- A simple example:
-
- import cmdln
-
- class MySVN(cmdln.Cmdln):
- name = "svn"
-
- @cmdln.aliases('stat', 'st')
- @cmdln.option('-v', '--verbose', action='store_true'
- help='print verbose information')
- def do_status(self, subcmd, opts, *paths):
- print "handle 'svn status' command"
-
- #...
-
- if __name__ == "__main__":
- shell = MySVN()
- retval = shell.main()
- sys.exit(retval)
-
- 'Cmdln' extends 'RawCmdln' by providing optparse option processing
- integration. See this class' _dispatch_cmd() docstring and
- <http://trentm.com/projects/cmdln> for more information.
- """
- def _dispatch_cmd(self, handler, argv):
- """Introspect sub-command handler signature to determine how to
- dispatch the command. The raw handler provided by the base
- 'RawCmdln' class is still supported:
-
- def do_foo(self, argv):
- # 'argv' is the vector of command line args, argv[0] is
- # the command name itself (i.e. "foo" or an alias)
- pass
-
- In addition, if the handler has more than 2 arguments option
- processing is automatically done (using optparse):
-
- @cmdln.option('-v', '--verbose', action='store_true')
- def do_bar(self, subcmd, opts, *args):
- # subcmd = <"bar" or an alias>
- # opts = <an optparse.Values instance>
- if opts.verbose:
- print "lots of debugging output..."
- # args = <tuple of arguments>
- for arg in args:
- bar(arg)
-
- TODO: explain that "*args" can be other signatures as well.
-
- The `cmdln.option` decorator corresponds to an `add_option()`
- method call on an `optparse.OptionParser` instance.
-
- You can declare a specific number of arguments:
-
- @cmdln.option('-v', '--verbose', action='store_true')
- def do_bar2(self, subcmd, opts, bar_one, bar_two):
- #...
-
- and an appropriate error message will be raised/printed if the
- command is called with a different number of args.
- """
- co_argcount = handler.im_func.func_code.co_argcount
- if co_argcount == 2: # handler ::= do_foo(self, argv)
- return handler(argv)
- elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
- try:
- optparser = handler.optparser
- except AttributeError:
- optparser = handler.im_func.optparser = SubCmdOptionParser()
- assert isinstance(optparser, SubCmdOptionParser)
- optparser.set_cmdln_info(self, argv[0])
- try:
- opts, args = optparser.parse_args(argv[1:])
- except StopOptionProcessing:
- #TODO: this doesn't really fly for a replacement of
- # optparse.py behaviour, does it?
- return 0 # Normal command termination
-
- try:
- return handler(argv[0], opts, *args)
- except TypeError, ex:
- # Some TypeError's are user errors:
- # do_foo() takes at least 4 arguments (3 given)
- # do_foo() takes at most 5 arguments (6 given)
- # do_foo() takes exactly 5 arguments (6 given)
- # Raise CmdlnUserError for these with a suitably
- # massaged error message.
- import sys
- tb = sys.exc_info()[2] # the traceback object
- if tb.tb_next is not None:
- # If the traceback is more than one level deep, then the
- # TypeError do *not* happen on the "handler(...)" call
- # above. In that we don't want to handle it specially
- # here: it would falsely mask deeper code errors.
- raise
- msg = ex.args[0]
- match = _INCORRECT_NUM_ARGS_RE.search(msg)
- if match:
- msg = list(match.groups())
- msg[1] = int(msg[1]) - 3
- if msg[1] == 1:
- msg[2] = msg[2].replace("arguments", "argument")
- msg[3] = int(msg[3]) - 3
- msg = ''.join(map(str, msg))
- raise CmdlnUserError(msg)
- else:
- raise
- else:
- raise CmdlnError("incorrect argcount for %s(): takes %d, must "
- "take 2 for 'argv' signature or 3+ for 'opts' "
- "signature" % (handler.__name__, co_argcount))
-
-
-
-#---- internal support functions
-
-def _format_linedata(linedata, indent, indent_width):
- """Format specific linedata into a pleasant layout.
-
- "linedata" is a list of 2-tuples of the form:
- (<item-display-string>, <item-docstring>)
- "indent" is a string to use for one level of indentation
- "indent_width" is a number of columns by which the
- formatted data will be indented when printed.
-
- The <item-display-string> column is held to 15 columns.
- """
- lines = []
- WIDTH = 78 - indent_width
- SPACING = 2
- NAME_WIDTH_LOWER_BOUND = 13
- NAME_WIDTH_UPPER_BOUND = 16
- NAME_WIDTH = max([len(s) for s,d in linedata])
- if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
- NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
- else:
- NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
-
- DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
- for namestr, doc in linedata:
- line = indent + namestr
- if len(namestr) <= NAME_WIDTH:
- line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
- else:
- lines.append(line)
- line = indent + ' ' * (NAME_WIDTH + SPACING)
- line += _summarize_doc(doc, DOC_WIDTH)
- lines.append(line.rstrip())
- return lines
-
-def _summarize_doc(doc, length=60):
- r"""Parse out a short one line summary from the given doclines.
-
- "doc" is the doc string to summarize.
- "length" is the max length for the summary
-
- >>> _summarize_doc("this function does this")
- 'this function does this'
- >>> _summarize_doc("this function does this", 10)
- 'this fu...'
- >>> _summarize_doc("this function does this\nand that")
- 'this function does this and that'
- >>> _summarize_doc("this function does this\n\nand that")
- 'this function does this'
- """
- import re
- if doc is None:
- return ""
- assert length > 3, "length <= 3 is absurdly short for a doc summary"
- doclines = doc.strip().splitlines(0)
- if not doclines:
- return ""
-
- summlines = []
- for i, line in enumerate(doclines):
- stripped = line.strip()
- if not stripped:
- break
- summlines.append(stripped)
- if len(''.join(summlines)) >= length:
- break
-
- summary = ' '.join(summlines)
- if len(summary) > length:
- summary = summary[:length-3] + "..."
- return summary
-
-
-def line2argv(line):
- r"""Parse the given line into an argument vector.
-
- "line" is the line of input to parse.
-
- This may get niggly when dealing with quoting and escaping. The
- current state of this parsing may not be completely thorough/correct
- in this respect.
-
- >>> from cmdln import line2argv
- >>> line2argv("foo")
- ['foo']
- >>> line2argv("foo bar")
- ['foo', 'bar']
- >>> line2argv("foo bar ")
- ['foo', 'bar']
- >>> line2argv(" foo bar")
- ['foo', 'bar']
-
- Quote handling:
-
- >>> line2argv("'foo bar'")
- ['foo bar']
- >>> line2argv('"foo bar"')
- ['foo bar']
- >>> line2argv(r'"foo\"bar"')
- ['foo"bar']
- >>> line2argv("'foo bar' spam")
- ['foo bar', 'spam']
- >>> line2argv("'foo 'bar spam")
- ['foo bar', 'spam']
-
- >>> line2argv('some\tsimple\ttests')
- ['some', 'simple', 'tests']
- >>> line2argv('a "more complex" test')
- ['a', 'more complex', 'test']
- >>> line2argv('a more="complex test of " quotes')
- ['a', 'more=complex test of ', 'quotes']
- >>> line2argv('a more" complex test of " quotes')
- ['a', 'more complex test of ', 'quotes']
- >>> line2argv('an "embedded \\"quote\\""')
- ['an', 'embedded "quote"']
-
- # Komodo bug 48027
- >>> line2argv('foo bar C:\\')
- ['foo', 'bar', 'C:\\']
-
- # Komodo change 127581
- >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
- ['\\test\\slash', 'foo bar', 'foo"bar']
-
- # Komodo change 127629
- >>> if sys.platform == "win32":
- ... line2argv(r'\foo\bar') == ['\\foo\\bar']
- ... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
- ... line2argv('"foo') == ['foo']
- ... else:
- ... line2argv(r'\foo\bar') == ['foobar']
- ... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
- ... try:
- ... line2argv('"foo')
- ... except ValueError, ex:
- ... "not terminated" in str(ex)
- True
- True
- True
- """
- import string
- line = line.strip()
- argv = []
- state = "default"
- arg = None # the current argument being parsed
- i = -1
- while 1:
- i += 1
- if i >= len(line): break
- ch = line[i]
-
- if ch == "\\" and i+1 < len(line):
- # escaped char always added to arg, regardless of state
- if arg is None: arg = ""
- if (sys.platform == "win32"
- or state in ("double-quoted", "single-quoted")
- ) and line[i+1] not in tuple('"\''):
- arg += ch
- i += 1
- arg += line[i]
- continue
-
- if state == "single-quoted":
- if ch == "'":
- state = "default"
- else:
- arg += ch
- elif state == "double-quoted":
- if ch == '"':
- state = "default"
- else:
- arg += ch
- elif state == "default":
- if ch == '"':
- if arg is None: arg = ""
- state = "double-quoted"
- elif ch == "'":
- if arg is None: arg = ""
- state = "single-quoted"
- elif ch in string.whitespace:
- if arg is not None:
- argv.append(arg)
- arg = None
- else:
- if arg is None: arg = ""
- arg += ch
- if arg is not None:
- argv.append(arg)
- if not sys.platform == "win32" and state != "default":
- raise ValueError("command line is not terminated: unfinished %s "
- "segment" % state)
- return argv
-
-
-def argv2line(argv):
- r"""Put together the given argument vector into a command line.
-
- "argv" is the argument vector to process.
-
- >>> from cmdln import argv2line
- >>> argv2line(['foo'])
- 'foo'
- >>> argv2line(['foo', 'bar'])
- 'foo bar'
- >>> argv2line(['foo', 'bar baz'])
- 'foo "bar baz"'
- >>> argv2line(['foo"bar'])
- 'foo"bar'
- >>> print argv2line(['foo" bar'])
- 'foo" bar'
- >>> print argv2line(["foo' bar"])
- "foo' bar"
- >>> argv2line(["foo'bar"])
- "foo'bar"
- """
- escapedArgs = []
- for arg in argv:
- if ' ' in arg and '"' not in arg:
- arg = '"'+arg+'"'
- elif ' ' in arg and "'" not in arg:
- arg = "'"+arg+"'"
- elif ' ' in arg:
- arg = arg.replace('"', r'\"')
- arg = '"'+arg+'"'
- escapedArgs.append(arg)
- return ' '.join(escapedArgs)
-
-
-# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
- """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-
- "lines" is a list of lines to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
-
- Same as dedent() except operates on a sequence of lines. Note: the
- lines list is modified **in-place**.
- """
- DEBUG = False
- if DEBUG:
- print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
- % (tabsize, skip_first_line)
- indents = []
- margin = None
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- indent = 0
- for ch in line:
- if ch == ' ':
- indent += 1
- elif ch == '\t':
- indent += tabsize - (indent % tabsize)
- elif ch in '\r\n':
- continue # skip all-whitespace lines
- else:
- break
- else:
- continue # skip all-whitespace lines
- if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
- if margin is None:
- margin = indent
- else:
- margin = min(margin, indent)
- if DEBUG: print "dedent: margin=%r" % margin
-
- if margin is not None and margin > 0:
- for i, line in enumerate(lines):
- if i == 0 and skip_first_line: continue
- removed = 0
- for j, ch in enumerate(line):
- if ch == ' ':
- removed += 1
- elif ch == '\t':
- removed += tabsize - (removed % tabsize)
- elif ch in '\r\n':
- if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
- lines[i] = lines[i][j:]
- break
- else:
- raise ValueError("unexpected non-whitespace char %r in "
- "line %r while removing %d-space margin"
- % (ch, line, margin))
- if DEBUG:
- print "dedent: %r: %r -> removed %d/%d"\
- % (line, ch, removed, margin)
- if removed == margin:
- lines[i] = lines[i][j+1:]
- break
- elif removed > margin:
- lines[i] = ' '*(removed-margin) + lines[i][j+1:]
- break
- return lines
-
-def _dedent(text, tabsize=8, skip_first_line=False):
- """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
- "text" is the text to dedent.
- "tabsize" is the tab width to use for indent width calculations.
- "skip_first_line" is a boolean indicating if the first line should
- be skipped for calculating the indent width and for dedenting.
- This is sometimes useful for docstrings and similar.
-
- textwrap.dedent(s), but don't expand tabs to spaces
- """
- lines = text.splitlines(1)
- _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
- return ''.join(lines)
-
-
-def _get_indent(marker, s, tab_width=8):
- """_get_indent(marker, s, tab_width=8) ->
- (<indentation-of-'marker'>, <indentation-width>)"""
- # Figure out how much the marker is indented.
- INDENT_CHARS = tuple(' \t')
- start = s.index(marker)
- i = start
- while i > 0:
- if s[i-1] not in INDENT_CHARS:
- break
- i -= 1
- indent = s[i:start]
- indent_width = 0
- for ch in indent:
- if ch == ' ':
- indent_width += 1
- elif ch == '\t':
- indent_width += tab_width - (indent_width % tab_width)
- return indent, indent_width
-
-def _get_trailing_whitespace(marker, s):
- """Return the whitespace content trailing the given 'marker' in string 's',
- up to and including a newline.
- """
- suffix = ''
- start = s.index(marker) + len(marker)
- i = start
- while i < len(s):
- if s[i] in ' \t':
- suffix += s[i]
- elif s[i] in '\r\n':
- suffix += s[i]
- if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
- suffix += s[i+1]
- break
- else:
- break
- i += 1
- return suffix
-
-
-
-#---- bash completion support
-# Note: This is still experimental. I expect to change this
-# significantly.
-#
-# To get Bash completion for a cmdln.Cmdln class, run the following
-# bash command:
-# $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
-# For example:
-# $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
-#
-#TODO: Simplify the above so don't have to given path to script (try to
-# find it on PATH, if possible). Could also make class name
-# optional if there is only one in the module (common case).
-
-if __name__ == "__main__" and len(sys.argv) == 6:
- def _log(s):
- return # no-op, comment out for debugging
- from os.path import expanduser
- fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
- fout.write(str(s) + '\n')
- fout.close()
-
- # Recipe: module_from_path (1.0.1+)
- def _module_from_path(path):
- import imp, os, sys
- path = os.path.expanduser(path)
- dir = os.path.dirname(path) or os.curdir
- name = os.path.splitext(os.path.basename(path))[0]
- sys.path.insert(0, dir)
- try:
- iinfo = imp.find_module(name, [dir])
- return imp.load_module(name, *iinfo)
- finally:
- sys.path.remove(dir)
-
- def _get_bash_cplns(script_path, class_name, cmd_name,
- token, preceding_token):
- _log('--')
- _log('get_cplns(%r, %r, %r, %r, %r)'
- % (script_path, class_name, cmd_name, token, preceding_token))
- comp_line = os.environ["COMP_LINE"]
- comp_point = int(os.environ["COMP_POINT"])
- _log("COMP_LINE: %r" % comp_line)
- _log("COMP_POINT: %r" % comp_point)
-
- try:
- script = _module_from_path(script_path)
- except ImportError, ex:
- _log("error importing `%s': %s" % (script_path, ex))
- return []
- shell = getattr(script, class_name)()
- cmd_map = shell._get_canonical_map()
- del cmd_map["EOF"]
-
- # Determine if completing the sub-command name.
- parts = comp_line[:comp_point].split(None, 1)
- _log(parts)
- if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
- #TODO: if parts[1].startswith('-'): handle top-level opts
- _log("complete sub-command names")
- matches = {}
- for name, canon_name in cmd_map.items():
- if name.startswith(token):
- matches[name] = canon_name
- if not matches:
- return []
- elif len(matches) == 1:
- return matches.keys()
- elif len(set(matches.values())) == 1:
- return [matches.values()[0]]
- else:
- return matches.keys()
-
- # Otherwise, complete options for the given sub-command.
- #TODO: refine this so it does the right thing with option args
- if token.startswith('-'):
- cmd_name = comp_line.split(None, 2)[1]
- try:
- cmd_canon_name = cmd_map[cmd_name]
- except KeyError:
- return []
- handler = shell._get_cmd_handler(cmd_canon_name)
- optparser = getattr(handler, "optparser", None)
- if optparser is None:
- optparser = SubCmdOptionParser()
- opt_strs = []
- for option in optparser.option_list:
- for opt_str in option._short_opts + option._long_opts:
- if opt_str.startswith(token):
- opt_strs.append(opt_str)
- return opt_strs
-
- return []
-
- for cpln in _get_bash_cplns(*sys.argv[1:]):
- print cpln
-
diff -r 7ffc6f78a4703d624debbbe8594b3c91bc1eda2a -r 6a5826b1386431aa99452f9fb5edd666cf4ed655 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -88,7 +88,7 @@
args.pf = pf
self(args)
else:
- args.pf = args.pf[0]
+ args.pf = getattr(args, 'pf', [None])[0]
self(args)
class GetParameterFiles(argparse.Action):
@@ -307,7 +307,7 @@
update_supp = True
vstring = None
if "site-packages" not in path:
- vstring = _get_hg_version(path)
+ vstring = get_hg_version(path)
print
print "The current version of the code is:"
print
@@ -316,7 +316,7 @@
print "---"
print
print "This installation CAN be automatically updated."
- _update_hg(path)
+ update_hg(path)
print "Updated successfully."
else:
print
@@ -360,7 +360,7 @@
update_supp = True
vstring = None
if "site-packages" not in path:
- vstring = _get_hg_version(path)
+ vstring = get_hg_version(path)
print
print "The current version of the code is:"
print
@@ -370,7 +370,7 @@
print
print "This installation CAN be automatically updated."
if opts.update_source:
- _update_hg(path)
+ update_hg(path)
print "Updated successfully."
elif opts.update_source:
print
@@ -740,7 +740,7 @@
print "projections')"
print
try:
- current_version = _get_yt_version()
+ current_version = get_yt_version()
except:
current_version = "Unavailable"
summary = raw_input("Summary? ")
@@ -801,7 +801,7 @@
print "If you don't have one, run the 'yt bootstrap_dev' command."
print
loki = raw_input()
- retval = _bb_apicall(endpoint, data, use_pass=True)
+ retval = bb_apicall(endpoint, data, use_pass=True)
import json
retval = json.loads(retval)
url = "http://hg.yt-project.org/yt/issue/%s" % retval['local_id']
https://bitbucket.org/yt_analysis/yt/changeset/aa16133a99dd/
changeset: aa16133a99dd
branch: yt
user: MatthewTurk
date: 2011-11-23 22:26:24
summary: Removing print statement
affected #: 1 file
diff -r 6a5826b1386431aa99452f9fb5edd666cf4ed655 -r aa16133a99dd03091b41e39d22bc83cc54151cc3 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -66,7 +66,6 @@
def __init__(cls, name, b, d):
type.__init__(cls, name, b, d)
if cls.name is not None:
- print "Adding", cls.name
sc = subparsers.add_parser(cls.name,
description = cls.description)
sc.set_defaults(func=cls.run)
https://bitbucket.org/yt_analysis/yt/changeset/349d298ebc59/
changeset: 349d298ebc59
branch: yt
user: MatthewTurk
date: 2011-11-25 06:11:05
summary: Adding checks for dimensionality < 3 in FLASH files.
affected #: 2 files
diff -r aa16133a99dd03091b41e39d22bc83cc54151cc3 -r 349d298ebc597567f513991c7b6d270be473ee1d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1093,6 +1093,7 @@
dv = dv[sl]
mask = self.__cut_mask_child_mask(grid)[sl]
dataVals = dv.ravel()[mask.ravel() == 1]
+ print
return dataVals
def _gen_node_name(self):
diff -r aa16133a99dd03091b41e39d22bc83cc54151cc3 -r 349d298ebc597567f513991c7b6d270be473ee1d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -136,14 +136,25 @@
offset = 7
ii = na.argsort(self.grid_levels.flat)
gid = self._handle["/gid"][:]
+ first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
for g in self.grids[ii].flat:
gi = g.id - g._id_offset
# FLASH uses 1-indexed group info
- g.Children = [self.grids[i - 1] for i in gid[gi,7:] if i > -1]
+ g.Children = [self.grids[i - 1] for i in gid[gi,first_ind:] if i > -1]
for g1 in g.Children:
g1.Parent = g
g._prepare_grid()
g._setup_dx()
+ if self.parameter_file.dimensionality < 3:
+ DD = (self.parameter_file.domain_right_edge[2] -
+ self.parameter_file.domain_left_edge[2])
+ for g in self.grids:
+ g.dds[2] = DD
+ if self.parameter_file.dimensionality < 2:
+ DD = (self.parameter_file.domain_right_edge[1] -
+ self.parameter_file.domain_left_edge[1])
+ for g in self.grids:
+ g.dds[1] = DD
self.max_level = self.grid_levels.max()
def _setup_derived_fields(self):
@@ -183,7 +194,6 @@
# These should be explicitly obtained from the file, but for now that
# will wait until a reorganization of the source tree and better
# generalization.
- self.dimensionality = 3
self.refine_by = 2
self.parameters["HydroMethod"] = 'flash' # always PPM DE
self.parameters["Time"] = 1. # default unit is 1...
@@ -275,6 +285,8 @@
[self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
self.domain_right_edge = na.array(
[self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
+ self.dimensionality = self._find_parameter("integer", "dimensionality",
+ scalar = True)
# Determine domain dimensions
try:
https://bitbucket.org/yt_analysis/yt/changeset/9fdd8bf18ee0/
changeset: 9fdd8bf18ee0
branch: yt
user: MatthewTurk
date: 2011-11-23 23:12:24
summary: Adding missing import
affected #: 1 file
diff -r aa16133a99dd03091b41e39d22bc83cc54151cc3 -r 9fdd8bf18ee00a7d76f33c0d1c2c7c2052eb7601 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,7 +24,7 @@
"""
import time, types, signal, inspect, traceback, sys, pdb, os
-import warnings, struct
+import warnings, struct, subprocess
from math import floor, ceil
from yt.utilities.exceptions import *
https://bitbucket.org/yt_analysis/yt/changeset/28a59d6b883c/
changeset: 28a59d6b883c
branch: yt
user: MatthewTurk
date: 2011-11-28 13:31:43
summary: Move around parallelism check and move --parallel into the parser
affected #: 1 file
diff -r 9fdd8bf18ee00a7d76f33c0d1c2c7c2052eb7601 -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -33,11 +33,7 @@
exe_name = os.path.basename(sys.executable)
# At import time, we determined whether or not we're being run in parallel.
-if exe_name in \
- ["mpi4py", "embed_enzo",
- "python"+sys.version[:3]+"-mpi"] \
- or "--parallel" in sys.argv or '_parallel' in dir(sys) \
- or any(["ipengine" in arg for arg in sys.argv]):
+def turn_on_parallelism():
try:
from mpi4py import MPI
parallel_capable = (MPI.COMM_WORLD.size > 1)
@@ -59,8 +55,7 @@
if ytcfg.getboolean("yt","LogFile"):
ytcfg["yt","LogFile"] = "False"
yt.utilities.logger.disable_file_logging()
-else:
- parallel_capable = False
+ return parallel_capable
# This fallback is for Paraview:
@@ -113,8 +108,22 @@
help = "Display detailed traceback.", nargs = 0)
parser.add_argument("--rpdb", action=SetExceptionHandling,
help = "Enable remote pdb interaction (for parallel debugging).", nargs = 0)
+parser.add_argument("--parallel", action="store_true", default=False,
+ dest = "parallel",
+ help = "Run in MPI-parallel mode (must be launched as an MPI task)")
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
if not ytcfg.getboolean("yt","__command_line"):
- parser.parse_args()
+ # Alternate:
+ # opts, args = parser.parse_known_args()
+ opts = parser.parse_args()
+
+if exe_name in \
+ ["mpi4py", "embed_enzo",
+ "python"+sys.version[:3]+"-mpi"] \
+ or opts.parallel or '_parallel' in dir(sys) \
+ or any(["ipengine" in arg for arg in sys.argv]):
+ parallel_capable = turn_on_parallelism()
+else:
+ parallel_capable = False
https://bitbucket.org/yt_analysis/yt/changeset/bbf110ae0e32/
changeset: bbf110ae0e32
branch: yt
user: MatthewTurk
date: 2011-12-02 17:12:00
summary: Merge
affected #: 8 files
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -78,11 +78,11 @@
and ensures that after the function is called, the field_parameters will
be returned to normal.
"""
- def save_state(self, grid, field=None):
+ def save_state(self, grid, field=None, *args, **kwargs):
old_params = grid.field_parameters
old_keys = grid.field_data.keys()
grid.field_parameters = self.field_parameters
- tr = func(self, grid, field)
+ tr = func(self, grid, field, *args, **kwargs)
grid.field_parameters = old_params
grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
return tr
@@ -322,8 +322,28 @@
pass
del self.field_data[key]
- def _generate_field_in_grids(self, fieldName):
- pass
+ def _generate_field(self, field):
+ if self.pf.field_info.has_key(field):
+ # First we check the validator
+ try:
+ self.pf.field_info[field].check_available(self)
+ except NeedsGridType, ngt_exception:
+ # We leave this to be implementation-specific
+ self._generate_field_in_grids(field, ngt_exception.ghost_zones)
+ return False
+ else:
+ self[field] = self.pf.field_info[field](self)
+ return True
+ else: # Can't find the field, try as it might
+ raise KeyError(field)
+
+ def _generate_field_in_grids(self, field, num_ghost_zones=0):
+ for grid in self._grids:
+ grid[field] = self.__touch_grid_field(grid, field)
+
+ @restore_grid_state
+ def __touch_grid_field(self, grid, field):
+ return grid[field]
_key_fields = None
def write_out(self, filename, fields=None, format="%0.16e"):
@@ -454,25 +474,6 @@
self._sortkey = None
self._sorted = {}
- def _generate_field_in_grids(self, field, num_ghost_zones=0):
- for grid in self._grids:
- temp = grid[field]
-
- def _generate_field(self, field):
- if self.pf.field_info.has_key(field):
- # First we check the validator
- try:
- self.pf.field_info[field].check_available(self)
- except NeedsGridType, ngt_exception:
- # We leave this to be implementation-specific
- self._generate_field_in_grids(field, ngt_exception.ghost_zones)
- return False
- else:
- self[field] = self.pf.field_info[field](self)
- return True
- else: # Can't find the field, try as it might
- raise KeyError(field)
-
def get_data(self, fields=None, in_grids=False):
if self._grids == None:
self._get_list_of_grids()
@@ -563,6 +564,7 @@
& (self.py < self.pf.hierarchy.grid_right_edge[:,self.py_ax]))
self._grids = self.hierarchy.grids[y]
+ @restore_grid_state
def _get_data_from_grid(self, grid, field):
# We are orthogonal, so we can feel free to make assumptions
# for the sake of speed.
@@ -657,6 +659,7 @@
t = t.reshape((t.shape[0],1))
return self.start_point + t*self.vec
+ @restore_grid_state
def _get_data_from_grid(self, grid, field):
mask = na.logical_and(self._get_cut_mask(grid),
grid.child_mask)
@@ -739,6 +742,7 @@
p = na.all((min_streampoint <= RE) & (max_streampoint > LE), axis=1)
self._grids = self.hierarchy.grids[p]
+ @restore_grid_state
def _get_data_from_grid(self, grid, field):
mask = na.logical_and(self._get_cut_mask(grid),
grid.child_mask)
@@ -828,25 +832,6 @@
for field in temp_data.keys():
self[field] = temp_data[field]
- def _generate_field(self, field):
- if self.pf.field_info.has_key(field):
- # First we check the validator
- try:
- self.pf.field_info[field].check_available(self)
- except NeedsGridType, ngt_exception:
- # We leave this to be implementation-specific
- self._generate_field_in_grids(field, ngt_exception.ghost_zones)
- return False
- else:
- self[field] = self.pf.field_info[field](self)
- return True
- else: # Can't find the field, try as it might
- raise KeyError(field)
-
- def _generate_field_in_grids(self, field, num_ghost_zones=0):
- for grid in self._grids:
- temp = grid[field]
-
def to_frb(self, width, resolution, center = None):
if center is None:
center = self.get_field_parameter("center")
@@ -1623,9 +1608,9 @@
# _project_level, then it would be more memory conservative
if self.preload_style == 'all':
dependencies = self.get_dependencies(fields, ghost_zones = False)
- print "Preloading %s grids and getting %s" % (
- len(self.source._get_grid_objs()),
- dependencies)
+ mylog.debug("Preloading %s grids and getting %s",
+ len(self.source._get_grid_objs()),
+ dependencies)
self.comm.preload([g for g in self._get_grid_objs()],
dependencies, self.hierarchy.io)
# By changing the remove-from-tree method to accumulate, we can avoid
@@ -2256,6 +2241,7 @@
dls[level].append(float(just_one(grid['d%s' % axis_names[self.axis]])))
return dls
+ @restore_grid_state
def _get_data_from_grid(self, grid, fields, dls):
g_fields = [grid[field].astype("float64") for field in fields]
c_fields = [self[field] for field in fields]
@@ -2393,29 +2379,6 @@
grid[field] = new_field
i += np
- def _generate_field(self, field):
- if self.pf.field_info.has_key(field):
- # First we check the validator
- try:
- self.pf.field_info[field].check_available(self)
- except NeedsGridType, ngt_exception:
- # We leave this to be implementation-specific
- self._generate_field_in_grids(field, ngt_exception.ghost_zones)
- return False
- else:
- self[field] = self.pf.field_info[field](self)
- return True
- else: # Can't find the field, try as it might
- raise KeyError(field)
-
- def _generate_field_in_grids(self, field, num_ghost_zones=0):
- for grid in self._grids:
- self.__touch_grid_field(grid, field)
-
- @restore_grid_state
- def __touch_grid_field(self, grid, field):
- grid[field]
-
def _is_fully_enclosed(self, grid):
return na.all(self._get_cut_mask)
@@ -3504,6 +3467,7 @@
output_field, output_left)
self.field_data[field] = output_field
+ @restore_grid_state
def _get_data_from_grid(self, grid, fields):
fields = ensure_list(fields)
g_fields = [grid[field].astype("float64") for field in fields]
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -91,11 +91,10 @@
# Because we need an instantiated class to check the pf's existence in
# the cache, we move that check to here from __new__. This avoids
# double-instantiation.
- if ytcfg.getboolean('yt', 'serialize'):
- try:
- _pf_store.check_pf(self)
- except NoParameterShelf:
- pass
+ try:
+ _pf_store.check_pf(self)
+ except NoParameterShelf:
+ pass
self.print_key_parameters()
self.create_field_info()
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -575,24 +575,24 @@
reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
# Initial setup:
mylog.debug("Reconstructing parent-child relationships")
- self.grids = []
+ grids = []
# We enumerate, so it's 0-indexed id and 1-indexed pid
self.filenames = ["-1"] * self.num_grids
for id,pid in enumerate(reverse_tree):
- self.grids.append(self.grid(id+1, self))
- self.grids[-1].Level = self.grid_levels[id, 0]
+ grids.append(self.grid(id+1, self))
+ grids[-1].Level = self.grid_levels[id, 0]
if pid > 0:
- self.grids[-1]._parent_id = pid
- self.grids[pid-1]._children_ids.append(self.grids[-1].id)
+ grids[-1]._parent_id = pid
+ grids[pid-1]._children_ids.append(grids[-1].id)
self.max_level = self.grid_levels.max()
mylog.debug("Preparing grids")
self.grids = na.empty(len(grids), dtype='object')
- for i, grid in enumerate(self.grids):
+ for i, grid in enumerate(grids):
if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
grid.filename = None
grid._prepare_grid()
grid.proc_num = self.grid_procs[i,0]
- self.grids[gi] = grid
+ self.grids[i] = grid
mylog.debug("Prepared")
def _initialize_grid_arrays(self):
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -136,14 +136,25 @@
offset = 7
ii = na.argsort(self.grid_levels.flat)
gid = self._handle["/gid"][:]
+ first_ind = -(self.parameter_file.refine_by**self.parameter_file.dimensionality)
for g in self.grids[ii].flat:
gi = g.id - g._id_offset
# FLASH uses 1-indexed group info
- g.Children = [self.grids[i - 1] for i in gid[gi,7:] if i > -1]
+ g.Children = [self.grids[i - 1] for i in gid[gi,first_ind:] if i > -1]
for g1 in g.Children:
g1.Parent = g
g._prepare_grid()
g._setup_dx()
+ if self.parameter_file.dimensionality < 3:
+ DD = (self.parameter_file.domain_right_edge[2] -
+ self.parameter_file.domain_left_edge[2])
+ for g in self.grids:
+ g.dds[2] = DD
+ if self.parameter_file.dimensionality < 2:
+ DD = (self.parameter_file.domain_right_edge[1] -
+ self.parameter_file.domain_left_edge[1])
+ for g in self.grids:
+ g.dds[1] = DD
self.max_level = self.grid_levels.max()
def _setup_derived_fields(self):
@@ -183,7 +194,6 @@
# These should be explicitly obtained from the file, but for now that
# will wait until a reorganization of the source tree and better
# generalization.
- self.dimensionality = 3
self.refine_by = 2
self.parameters["HydroMethod"] = 'flash' # always PPM DE
self.parameters["Time"] = 1. # default unit is 1...
@@ -275,6 +285,8 @@
[self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
self.domain_right_edge = na.array(
[self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
+ self.dimensionality = self._find_parameter("integer", "dimensionality",
+ scalar = True)
# Determine domain dimensions
try:
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -139,9 +139,9 @@
my_plugin_name = ytcfg.get("yt","pluginfilename")
# We assume that it is with respect to the $HOME/.yt directory
if os.path.isfile(my_plugin_name):
- fn = my_plugin_name
+ _fn = my_plugin_name
else:
- fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
- if os.path.isfile(fn):
- mylog.info("Loading plugins from %s", fn)
- execfile(fn)
+ _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+ if os.path.isfile(_fn):
+ mylog.info("Loading plugins from %s", _fn)
+ execfile(_fn)
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -323,6 +323,8 @@
mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
my_communicator = communication_system.communicators[-1]
my_size = my_communicator.size
+ if njobs <= 0:
+ njobs = my_size
if njobs > my_size:
mylog.error("You have asked for %s jobs, but you only have %s processors.",
njobs, my_size)
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/utilities/parameter_file_storage.py
--- a/yt/utilities/parameter_file_storage.py
+++ b/yt/utilities/parameter_file_storage.py
@@ -62,6 +62,7 @@
_distributed = True
_processing = False
_owner = 0
+ _register = True
def __new__(cls, *p, **k):
self = object.__new__(cls, *p, **k)
@@ -74,6 +75,7 @@
Otherwise, use read-only settings.
"""
+ if self._register == False: return
if ytcfg.getboolean("yt", "StoreParameterFiles"):
self._read_only = False
self.init_db()
@@ -81,6 +83,7 @@
else:
self._read_only = True
self._records = {}
+ self._register = False
@parallel_simple_proxy
def init_db(self):
diff -r 28a59d6b883c5b6aa12aaeda3cecec26d3d32b87 -r bbf110ae0e32910ca44707643ee4946dae83b35e yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -400,7 +400,7 @@
if coord == None:
coord = center[axis]
if obj is None:
- if field_parameters == None: field_parameters = {}
+ if field_parameters is None: field_parameters = {}
obj = self.pf.hierarchy.slice(axis, coord, field,
center=center, **field_parameters)
p = self._add_plot(SlicePlot(
https://bitbucket.org/yt_analysis/yt/changeset/f3ee3630f3db/
changeset: f3ee3630f3db
branch: yt
user: MatthewTurk
date: 2011-12-02 17:13:59
summary: No-op merge because of dangling changeset
affected #: 0 files
https://bitbucket.org/yt_analysis/yt/changeset/47821321882b/
changeset: 47821321882b
branch: yt
user: MatthewTurk
date: 2011-12-20 13:53:51
summary: Adding "Name" to the static output min rep
affected #: 1 file
diff -r 6ed7167d3a34e4a2e7e2e6b7e8bb95970098b4e8 -r 47821321882b456358aa514e070836815d96daaf yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -76,11 +76,12 @@
"current_time", "domain_left_edge", "domain_right_edge",
"unique_identifier", "current_redshift", "output_hash",
"cosmological_simulation", "omega_matter", "omega_lambda",
- "hubble_constant")
+ "hubble_constant", "name")
def __init__(self, obj):
super(MinimalStaticOutput, self).__init__(obj)
self.output_hash = obj._hash()
+ self.name = str(obj)
def _generate_post(self):
metadata = self._attrs
https://bitbucket.org/yt_analysis/yt/changeset/70d12691846b/
changeset: 70d12691846b
branch: yt
user: MatthewTurk
date: 2011-12-20 21:57:00
summary: Merging from the mainline branch into config_reorg.
affected #: 51 files
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
import setuptools
-VERSION = "2.3dev"
+VERSION = "2.4dev"
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -32,6 +32,7 @@
import numpy as na
import random
import sys
+import os.path as path
from collections import defaultdict
from yt.funcs import *
@@ -1360,12 +1361,16 @@
# The halos are listed in order in the file.
lines = file("%s.txt" % self.basename)
locations = []
+ realpath = path.realpath("%s.txt" % self.basename)
for line in lines:
line = line.split()
# Prepend the hdf5 file names with the full path.
temp = []
for item in line[1:]:
- temp.append(self.pf.fullpath + '/' + item)
+ # This assumes that the .txt is in the same place as
+ # the h5 files, which is a good one I think.
+ item = item.split("/")
+ temp.append(path.join(path.dirname(realpath), item[-1]))
locations.append(temp)
lines.close()
return locations
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -86,6 +86,9 @@
"ChildHaloID3", "ChildHaloFrac3",
"ChildHaloID4", "ChildHaloFrac4"]
+NumNeighbors = 15
+NumDB = 5
+
class DatabaseFunctions(object):
# Common database functions so it doesn't have to be repeated.
def _open_database(self):
@@ -366,9 +369,9 @@
child_points = na.array(child_points)
fKD.pos = na.asfortranarray(child_points.T)
fKD.qv = na.empty(3, dtype='float64')
- fKD.dist = na.empty(5, dtype='float64')
- fKD.tags = na.empty(5, dtype='int64')
- fKD.nn = 5
+ fKD.dist = na.empty(NumNeighbors, dtype='float64')
+ fKD.tags = na.empty(NumNeighbors, dtype='int64')
+ fKD.nn = NumNeighbors
fKD.sort = True
fKD.rearrange = True
create_tree(0)
@@ -395,7 +398,7 @@
nIDs.append(n)
# We need to fill in fake halos if there aren't enough halos,
# which can happen at high redshifts.
- while len(nIDs) < 5:
+ while len(nIDs) < NumNeighbors:
nIDs.append(-1)
candidates[row[0]] = nIDs
@@ -405,12 +408,12 @@
self.candidates = candidates
# This stores the masses contributed to each child candidate.
- self.child_mass_arr = na.zeros(len(candidates)*5, dtype='float64')
+ self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
# Records where to put the entries in the above array.
self.child_mass_loc = defaultdict(dict)
for i,halo in enumerate(sorted(candidates)):
for j, child in enumerate(candidates[halo]):
- self.child_mass_loc[halo][child] = i*5 + j
+ self.child_mass_loc[halo][child] = i*NumNeighbors + j
def _build_h5_refs(self, filename):
# For this snapshot, add lists of file names that contain the
@@ -618,8 +621,8 @@
result = self.cursor.fetchone()
while result:
mass = result[0]
- self.child_mass_arr[mark:mark+5] /= mass
- mark += 5
+ self.child_mass_arr[mark:mark+NumNeighbors] /= mass
+ mark += NumNeighbors
result = self.cursor.fetchone()
# Get the global ID for the SnapHaloID=0 from the child, this will
@@ -642,14 +645,15 @@
# We need to get the GlobalHaloID for this child.
child_globalID = baseChildID + child
child_indexes.append(child_globalID)
- child_per.append(self.child_mass_arr[i*5 + j])
+ child_per.append(self.child_mass_arr[i*NumNeighbors + j])
# Sort by percentages, desending.
child_per, child_indexes = zip(*sorted(zip(child_per, child_indexes), reverse=True))
values = []
- for pair in zip(child_indexes, child_per):
+ for pair_count, pair in enumerate(zip(child_indexes, child_per)):
+ if pair_count == NumDB: break
values.extend([int(pair[0]), float(pair[1])])
#values.extend([parent_currt, parent_halo])
- # This has the child ID, child percent listed five times, followed
+ # This has the child ID, child percent listed NumDB times, followed
# by the currt and this parent halo ID (SnapHaloID).
#values = tuple(values)
self.write_values.append(values)
@@ -841,7 +845,7 @@
[1609, 0.0]]
"""
parents = []
- for i in range(5):
+ for i in range(NumDB):
string = "SELECT GlobalHaloID, ChildHaloFrac%d FROM Halos\
WHERE ChildHaloID%d=%d;" % (i, i, GlobalHaloID)
self.cursor.execute(string)
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,5 @@
from .multi_halo_profiler import \
HaloProfiler, \
FakeProfile, \
- shift_projections, \
+ get_halo_sphere, \
standard_fields
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -46,7 +46,8 @@
from yt.utilities.parallel_tools.parallel_analysis_interface import \
ParallelAnalysisInterface, \
parallel_blocking_call, \
- parallel_root_only
+ parallel_root_only, \
+ parallel_objects
from yt.visualization.fixed_resolution import \
FixedResolutionBuffer
from yt.visualization.image_writer import write_image
@@ -66,7 +67,7 @@
recenter = None,
profile_output_dir='radial_profiles', projection_output_dir='projections',
projection_width=8.0, projection_width_units='mpc', project_at_level='max',
- velocity_center=['bulk', 'halo'], filter_quantities=['id','center'],
+ velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'],
use_critical_density=False):
r"""Initialize a Halo Profiler object.
@@ -184,7 +185,6 @@
self._halo_filters = []
self.all_halos = []
self.filtered_halos = []
- self._projection_halo_list = []
# Create output directory if specified
if self.output_dir is not None:
@@ -351,7 +351,8 @@
"""
- self.profile_fields.append({'field':field, 'weight_field':weight_field, 'accumulation':accumulation})
+ self.profile_fields.append({'field':field, 'weight_field':weight_field,
+ 'accumulation':accumulation})
def add_projection(self, field, weight_field=None, cmap='algae'):
r"""Make a projection of the specified field.
@@ -453,7 +454,7 @@
# Profile all halos.
updated_halos = []
- for halo in self._get_objs('all_halos', round_robin=True):
+ for halo in parallel_objects(self.all_halos, -1):
# Apply prefilters to avoid profiling unwanted halos.
filter_result = True
haloQuantities = {}
@@ -509,7 +510,7 @@
def _get_halo_profile(self, halo, filename, virial_filter=True,
force_write=False):
- """Profile a single halo and write profile data to a file.
+ r"""Profile a single halo and write profile data to a file.
If file already exists, read profile data from file.
Return a dictionary of id, center, and virial quantities if virial_filter is True.
"""
@@ -527,39 +528,9 @@
mylog.error("Skipping halo with r_max / r_min = %f." % (halo['r_max']/r_min))
return None
- sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
- if len(sphere._grids) == 0: return None
- new_sphere = False
-
- if self.recenter:
- old = halo['center']
- if self.recenter in centering_registry:
- new_x, new_y, new_z = \
- centering_registry[self.recenter](sphere)
- else:
- # user supplied function
- new_x, new_y, new_z = self.recenter(sphere)
- if new_x < self.pf.domain_left_edge[0] or \
- new_y < self.pf.domain_left_edge[1] or \
- new_z < self.pf.domain_left_edge[2]:
- mylog.info("Recentering rejected, skipping halo %d" % \
- halo['id'])
- return None
- halo['center'] = [new_x, new_y, new_z]
- d = self.pf['kpc'] * periodic_dist(old, halo['center'],
- self.pf.domain_right_edge - self.pf.domain_left_edge)
- mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
- # Expand the halo to account for recentering.
- halo['r_max'] += d / 1000 # d is in kpc -> want mpc
- new_sphere = True
-
- if new_sphere:
- # Temporary solution to memory leak.
- for g in self.pf.h.grids:
- g.clear_data()
- sphere.clear_data()
- del sphere
- sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+ # get a sphere object to profile
+ sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+ if sphere is None: return None
if self._need_bulk_velocity:
# Set bulk velocity to zero out radial velocity profiles.
@@ -567,7 +538,9 @@
if self.velocity_center[1] == 'halo':
sphere.set_field_parameter('bulk_velocity', halo['velocity'])
elif self.velocity_center[1] == 'sphere':
- sphere.set_field_parameter('bulk_velocity', sphere.quantities['BulkVelocity'](lazy_reader=False, preload=False))
+ sphere.set_field_parameter('bulk_velocity',
+ sphere.quantities['BulkVelocity'](lazy_reader=False,
+ preload=False))
else:
mylog.error("Invalid parameter: VelocityCenter.")
elif self.velocity_center[0] == 'max':
@@ -645,18 +618,18 @@
# Get list of halos for projecting.
if halo_list == 'filtered':
- self._halo_projection_list = self.filtered_halos
+ halo_projection_list = self.filtered_halos
elif halo_list == 'all':
- self._halo_projection_list = self.all_halos
+ halo_projection_list = self.all_halos
elif isinstance(halo_list, types.StringType):
- self._halo_projection_list = self._read_halo_list(halo_list)
+ halo_projection_list = self._read_halo_list(halo_list)
elif isinstance(halo_list, types.ListType):
- self._halo_projection_list = halo_list
+ halo_projection_list = halo_list
else:
mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
return
- if len(self._halo_projection_list) == 0:
+ if len(halo_projection_list) == 0:
mylog.error("Halo list for projections is empty.")
return
@@ -665,7 +638,8 @@
proj_level = self.pf.h.max_level
else:
proj_level = int(self.project_at_level)
- proj_dx = self.pf.units[self.projection_width_units] / self.pf.parameters['TopGridDimensions'][0] / \
+ proj_dx = self.pf.units[self.projection_width_units] / \
+ self.pf.parameters['TopGridDimensions'][0] / \
(self.pf.parameters['RefineBy']**proj_level)
projectionResolution = int(self.projection_width / proj_dx)
@@ -678,21 +652,25 @@
my_output_dir = "%s/%s" % (self.pf.fullpath, self.projection_output_dir)
self.__check_directory(my_output_dir)
- center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] + self.pf.parameters['DomainRightEdge'][w])
+ center = [0.5 * (self.pf.parameters['DomainLeftEdge'][w] +
+ self.pf.parameters['DomainRightEdge'][w])
for w in range(self.pf.parameters['TopGridRank'])]
- for halo in self._get_objs('_halo_projection_list', round_robin=True):
+ for halo in parallel_objects(halo_projection_list, -1):
if halo is None:
continue
# Check if region will overlap domain edge.
# Using non-periodic regions is faster than using periodic ones.
- leftEdge = [(halo['center'][w] - 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+ leftEdge = [(halo['center'][w] -
+ 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
for w in range(len(halo['center']))]
- rightEdge = [(halo['center'][w] + 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
+ rightEdge = [(halo['center'][w] +
+ 0.5 * self.projection_width/self.pf.units[self.projection_width_units])
for w in range(len(halo['center']))]
mylog.info("Projecting halo %04d in region: [%f, %f, %f] to [%f, %f, %f]." %
- (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2], rightEdge[0], rightEdge[1], rightEdge[2]))
+ (halo['id'], leftEdge[0], leftEdge[1], leftEdge[2],
+ rightEdge[0], rightEdge[1], rightEdge[2]))
need_per = False
for w in range(len(halo['center'])):
@@ -719,13 +697,13 @@
for hp in self.projection_fields:
projections.append(self.pf.h.proj(w, hp['field'],
weight_field=hp['weight_field'],
- data_source=region, center=halo['center'],
+ source=region, center=halo['center'],
serialize=False))
# Set x and y limits, shift image if it overlaps domain boundary.
if need_per:
pw = self.projection_width/self.pf.units[self.projection_width_units]
- #shift_projections(self.pf, projections, halo['center'], center, w)
+ _shift_projections(self.pf, projections, halo['center'], center, w)
# Projection has now been shifted to center of box.
proj_left = [center[x_axis]-0.5*pw, center[y_axis]-0.5*pw]
proj_right = [center[x_axis]+0.5*pw, center[y_axis]+0.5*pw]
@@ -756,11 +734,85 @@
if save_images:
filename = "%s/Halo_%04d_%s_%s.png" % (my_output_dir, halo['id'],
dataset_name, axis_labels[w])
- write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+ if (frb[hp['field']] != 0).any():
+ write_image(na.log10(frb[hp['field']]), filename, cmap_name=hp['cmap'])
+ else:
+ mylog.info('Projection of %s for halo %d is all zeros, skipping image.' %
+ (hp['field'], halo['id']))
if save_cube: output.close()
del region
+ @parallel_blocking_call
+ def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
+ analysis_output_dir=None):
+ r"""Perform custom analysis on all halos.
+
+ This will loop through all halo on the HaloProfiler's list,
+ creating a sphere object for each halo and passing that sphere
+ to the provided analysis function.
+
+ Parameters
+ ---------
+ analysis_function : function
+ A function taking two arguments, the halo dictionary, and a
+ sphere object.
+ Example function to calculate total mass of halo:
+ def my_analysis(halo, sphere):
+ total_mass = sphere.quantities['TotalMass']()
+ print total_mass
+ halo_list : {'filtered', 'all'}
+ Which set of halos to make profiles of, either ones passed by the
+ halo filters (if enabled/added), or all halos.
+ Default='filtered'.
+ analysis_output_dir : string, optional
+ If specified, this directory will be created within the dataset to
+ contain any output from the analysis function. Default: None.
+
+ Examples
+ --------
+ >>> hp.analyze_halo_spheres(my_analysis, halo_list="filtered",
+ analysis_output_dir='special_analysis')
+
+ """
+
+ # Get list of halos for projecting.
+ if halo_list == 'filtered':
+ halo_analysis_list = self.filtered_halos
+ elif halo_list == 'all':
+ halo_analysis_list = self.all_halos
+ elif isinstance(halo_list, types.StringType):
+ halo_analysis_list = self._read_halo_list(halo_list)
+ elif isinstance(halo_list, types.ListType):
+ halo_analysis_list = halo_list
+ else:
+ mylog.error("Keyword, halo_list', must be 'filtered', 'all', a filename, or an actual list.")
+ return
+
+ if len(halo_analysis_list) == 0:
+ mylog.error("Halo list for analysis is empty.")
+ return
+
+ # Create output directory.
+ if analysis_output_dir is not None:
+ if self.output_dir is not None:
+ self.__check_directory("%s/%s" % (self.output_dir, self.pf.directory))
+ my_output_dir = "%s/%s/%s" % (self.output_dir, self.pf.directory,
+ analysis_output_dir)
+ else:
+ my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
+ self.__check_directory(my_output_dir)
+
+ for halo in parallel_objects(halo_analysis_list, -1):
+ if halo is None: continue
+
+ # Get a sphere object to analze.
+ sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+ if sphere is None: continue
+
+ # Call the given analysis function.
+ analysis_function(halo, sphere)
+
def _add_actual_overdensity(self, profile):
"Calculate overdensity from TotalMassMsun and CellVolume fields."
@@ -917,7 +969,8 @@
def _run_hop(self, hop_file):
"Run hop to get halos."
- hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args, **self.halo_finder_kwargs)
+ hop_results = self.halo_finder_function(self.pf, *self.halo_finder_args,
+ **self.halo_finder_kwargs)
hop_results.write_out(hop_file)
del hop_results
@@ -989,7 +1042,95 @@
else:
os.mkdir(my_output_dir)
-def shift_projections(pf, projections, oldCenter, newCenter, axis):
+def get_halo_sphere(halo, pf, recenter=None):
+ r"""Returns a sphere object for a given halo.
+
+ With a dictionary containing halo properties, such as center
+ and r_max, this creates a sphere object and optionally
+ recenters and recreates the sphere using a recentering function.
+ This is to be used primarily to make spheres for a set of halos
+ loaded by the HaloProfiler.
+
+ Parameters
+ ----------
+ halo : dict, required
+ The dictionary containing halo properties used to make the sphere.
+ Required entries:
+ center : list with center coordinates.
+ r_max : sphere radius in Mpc.
+ pf : parameter file object, required
+ The parameter file from which the sphere will be made.
+ recenter : {None, string or function}
+ The exact location of the sphere center can significantly affect
+ radial profiles. The halo center loaded by the HaloProfiler will
+ typically be the dark matter center of mass calculated by a halo
+ finder. However, this may not be the best location for centering
+ profiles of baryon quantities. For example, one may want to center
+ on the maximum density.
+ If recenter is given as a string, one of the existing recentering
+ functions will be used:
+ Min_Dark_Matter_Density : location of minimum dark matter density
+ Max_Dark_Matter_Density : location of maximum dark matter density
+ CoM_Dark_Matter_Density : dark matter center of mass
+ Min_Gas_Density : location of minimum gas density
+ Max_Gas_Density : location of maximum gas density
+ CoM_Gas_Density : gas center of mass
+ Min_Total_Density : location of minimum total density
+ Max_Total_Density : location of maximum total density
+ CoM_Total_Density : total center of mass
+ Min_Temperature : location of minimum temperature
+ Max_Temperature : location of maximum temperature
+ Alternately, a function can be supplied for custom recentering.
+ The function should take only one argument, a sphere object.
+ Example function:
+ def my_center_of_mass(data):
+ my_x, my_y, my_z = data.quantities['CenterOfMass']()
+ return (my_x, my_y, my_z)
+
+ Examples: this should primarily be used with the halo list of the HaloProfiler.
+ This is an example with an abstract halo asssuming a pre-defined pf.
+ >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
+ >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
+ >>> # Assuming the above example function has been defined.
+ >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
+ """
+
+ sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+ if len(sphere._grids) == 0: return None
+ new_sphere = False
+
+ if recenter:
+ old = halo['center']
+ if recenter in centering_registry:
+ new_x, new_y, new_z = \
+ centering_registry[recenter](sphere)
+ else:
+ # user supplied function
+ new_x, new_y, new_z = recenter(sphere)
+ if new_x < pf.domain_left_edge[0] or \
+ new_y < pf.domain_left_edge[1] or \
+ new_z < pf.domain_left_edge[2]:
+ mylog.info("Recentering rejected, skipping halo %d" % \
+ halo['id'])
+ return None
+ halo['center'] = [new_x, new_y, new_z]
+ d = pf['kpc'] * periodic_dist(old, halo['center'],
+ pf.domain_right_edge - pf.domain_left_edge)
+ mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+ # Expand the halo to account for recentering.
+ halo['r_max'] += d / 1000 # d is in kpc -> want mpc
+ new_sphere = True
+
+ if new_sphere:
+ # Temporary solution to memory leak.
+ for g in pf.h.grids:
+ g.clear_data()
+ sphere.clear_data()
+ del sphere
+ sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
+ return sphere
+
+def _shift_projections(pf, projections, oldCenter, newCenter, axis):
"""
Shift projection data around.
This is necessary when projecting a preiodic region.
@@ -1059,14 +1200,19 @@
add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
# Add the hanging cells back to the projection data.
- plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
- plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
- plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
- plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
- plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+ plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px,
+ add2_x_px, add2_y_px])
+ plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py,
+ add2_x_py, add2_y_py])
+ plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx,
+ add2_x_pdx, add2_y_pdx])
+ plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy,
+ add2_x_pdy, add2_y_pdy])
+ plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field,
+ add2_x_field, add2_y_field])
plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
- add_x_weight_field, add_y_weight_field,
- add2_x_weight_field, add2_y_weight_field])
+ add_x_weight_field, add_y_weight_field,
+ add2_x_weight_field, add2_y_weight_field])
# Delete original copies of hanging cells.
del add_x_px, add_y_px, add2_x_px, add2_y_px
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -96,6 +96,8 @@
self._pf.current_redshift) # seconds
# Build the distribution.
self.build_dist()
+ # Attach some convenience arrays.
+ self.attach_arrays()
def build_dist(self):
"""
@@ -127,6 +129,47 @@
# We will want the time taken between bins.
self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1]
+ def attach_arrays(self):
+ """
+ Attach convenience arrays to the class for easy access.
+ """
+ if self.mode == 'data_source':
+ try:
+ vol = self._data_source.volume('mpc')
+ except AttributeError:
+ # If we're here, this is probably a HOPHalo object, and we
+ # can get the volume this way.
+ ds = self._data_source.get_sphere()
+ vol = ds.volume('mpc')
+ elif self.mode == 'provided':
+ vol = self.volume
+ tc = self._pf["Time"]
+ self.time = []
+ self.lookback_time = []
+ self.redshift = []
+ self.Msol_yr = []
+ self.Msol_yr_vol = []
+ self.Msol = []
+ self.Msol_cumulative = []
+ # Use the center of the time_bin, not the left edge.
+ for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+ self.time.append(time * tc / YEAR)
+ self.lookback_time.append((self.time_now - time * tc)/YEAR)
+ self.redshift.append(self.cosm.ComputeRedshiftFromTime(time * tc))
+ self.Msol_yr.append(self.mass_bins[i] / \
+ (self.time_bins_dt[i] * tc / YEAR))
+ self.Msol_yr_vol.append(self.mass_bins[i] / \
+ (self.time_bins_dt[i] * tc / YEAR) / vol)
+ self.Msol.append(self.mass_bins[i])
+ self.Msol_cumulative.append(self.cum_mass_bins[i])
+ self.time = na.array(self.time)
+ self.lookback_time = na.array(self.lookback_time)
+ self.redshift = na.array(self.redshift)
+ self.Msol_yr = na.array(self.Msol_yr)
+ self.Msol_yr_vol = na.array(self.Msol_yr_vol)
+ self.Msol = na.array(self.Msol)
+ self.Msol_cumulative = na.array(self.Msol_cumulative)
+
def write_out(self, name="StarFormationRate.out"):
r"""Write out the star analysis to a text file *name*. The columns are in
order.
@@ -150,31 +193,21 @@
>>> sfr.write_out("stars-SFR.out")
"""
fp = open(name, "w")
- if self.mode == 'data_source':
- try:
- vol = self._data_source.volume('mpc')
- except AttributeError:
- # If we're here, this is probably a HOPHalo object, and we
- # can get the volume this way.
- ds = self._data_source.get_sphere()
- vol = ds.volume('mpc')
- elif self.mode == 'provided':
- vol = self.volume
- tc = self._pf["Time"]
- # Use the center of the time_bin, not the left edge.
fp.write("#time\tlookback\tredshift\tMsol/yr\tMsol/yr/Mpc3\tMsol\tcumMsol\t\n")
- for i, time in enumerate((self.time_bins[1:] + self.time_bins[:-1])/2.):
+ for i, time in enumerate(self.time):
line = "%1.5e %1.5e %1.5e %1.5e %1.5e %1.5e %1.5e\n" % \
- (time * tc / YEAR, # Time
- (self.time_now - time * tc)/YEAR, # Lookback time
- self.cosm.ComputeRedshiftFromTime(time * tc), # Redshift
- self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR), # Msol/yr
- self.mass_bins[i] / (self.time_bins_dt[i] * tc / YEAR) / vol, # Msol/yr/vol
- self.mass_bins[i], # Msol in bin
- self.cum_mass_bins[i]) # cumulative
+ (time, # Time
+ self.lookback_time[i], # Lookback time
+ self.redshift[i], # Redshift
+ self.Msol_yr[i], # Msol/yr
+ self.Msol_yr_vol[i], # Msol/yr/vol
+ self.Msol[i], # Msol in bin
+ self.Msol_cumulative[i]) # cumulative
fp.write(line)
fp.close()
+### Begin Synthetic Spectrum Stuff. ####
+
CHABRIER = {
"Z0001" : "bc2003_hr_m22_chab_ssp.ised.h5", #/* 0.5% */
"Z0004" : "bc2003_hr_m32_chab_ssp.ised.h5", #/* 2% */
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -53,6 +53,8 @@
TrilinearFieldInterpolator
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.minimal_representation import \
+ MinimalProjectionData
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
@@ -88,6 +90,20 @@
return tr
return save_state
+def restore_field_information_state(func):
+ """
+ A decorator that takes a function with the API of (self, grid, field)
+ and ensures that after the function is called, the field_parameters will
+ be returned to normal.
+ """
+ def save_state(self, grid, field=None, *args, **kwargs):
+ old_params = grid.field_parameters
+ grid.field_parameters = self.field_parameters
+ tr = func(self, grid, field, *args, **kwargs)
+ grid.field_parameters = old_params
+ return tr
+ return save_state
+
def cache_mask(func):
"""
For computationally intensive indexing operations, we can cache
@@ -833,6 +849,38 @@
self[field] = temp_data[field]
def to_frb(self, width, resolution, center = None):
+ r"""This function returns a FixedResolutionBuffer generated from this
+ object.
+
+ A FixedResolutionBuffer is an object that accepts a variable-resolution
+ 2D object and transforms it into an NxM bitmap that can be plotted,
+ examined or processed. This is a convenience function to return an FRB
+ directly from an existing 2D data object.
+
+ Parameters
+ ----------
+ width : width specifier
+ This can either be a floating point value, in the native domain
+ units of the simulation, or a tuple of the (value, unit) style.
+ This will be the width of the FRB.
+ resolution : int or tuple of ints
+ The number of pixels on a side of the final FRB.
+ center : array-like of floats, optional
+ The center of the FRB. If not specified, defaults to the center of
+ the current object.
+
+ Returns
+ -------
+ frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
+ A fixed resolution buffer, which can be queried for fields.
+
+ Examples
+ --------
+
+ >>> proj = pf.h.proj(0, "Density")
+ >>> frb = proj.to_frb( (100.0, 'kpc'), 1024)
+ >>> write_image(na.log10(frb["Density"]), 'density_100kpc.png')
+ """
if center is None:
center = self.get_field_parameter("center")
if center is None:
@@ -1258,6 +1306,52 @@
return "%s/c%s_L%s" % \
(self._top_node, cen_name, L_name)
+ def to_frb(self, width, resolution):
+ r"""This function returns an ObliqueFixedResolutionBuffer generated
+ from this object.
+
+ An ObliqueFixedResolutionBuffer is an object that accepts a
+ variable-resolution 2D object and transforms it into an NxM bitmap that
+ can be plotted, examined or processed. This is a convenience function
+ to return an FRB directly from an existing 2D data object. Unlike the
+ corresponding to_frb function for other AMR2DData objects, this does
+ not accept a 'center' parameter as it is assumed to be centered at the
+ center of the cutting plane.
+
+ Parameters
+ ----------
+ width : width specifier
+ This can either be a floating point value, in the native domain
+ units of the simulation, or a tuple of the (value, unit) style.
+ This will be the width of the FRB.
+ resolution : int or tuple of ints
+ The number of pixels on a side of the final FRB.
+
+ Returns
+ -------
+ frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
+ A fixed resolution buffer, which can be queried for fields.
+
+ Examples
+ --------
+
+ >>> v, c = pf.h.find_max("Density")
+ >>> sp = pf.h.sphere(c, (100.0, 'au'))
+ >>> L = sp.quantities["AngularMomentumVector"]()
+ >>> cutting = pf.h.cutting(L, c)
+ >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
+ >>> write_image(na.log10(frb["Density"]), 'density_1pc.png')
+ """
+ if iterable(width):
+ w, u = width
+ width = w/self.pf[u]
+ if not iterable(resolution):
+ resolution = (resolution, resolution)
+ from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+ bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+ frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+ return frb
+
class AMRFixedResCuttingPlaneBase(AMR2DData):
"""
AMRFixedResCuttingPlaneBase is an oblique plane through the data,
@@ -1553,6 +1647,10 @@
self._refresh_data()
if self._okay_to_serialize and self.serialize: self._serialize(node_name=self._node_name)
+ @property
+ def _mrep(self):
+ return MinimalProjectionData(self)
+
def _convert_field_name(self, field):
if field == "weight_field": return "weight_field_%s" % self._weight
if field in self._key_fields: return field
@@ -3467,7 +3565,7 @@
output_field, output_left)
self.field_data[field] = output_field
- @restore_grid_state
+ @restore_field_information_state
def _get_data_from_grid(self, grid, fields):
fields = ensure_list(fields)
g_fields = [grid[field].astype("float64") for field in fields]
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -133,7 +133,6 @@
if weight:
f[u] /= w[u]
self[field] = f
- self["myweight"] = w
self["UsedBins"] = u
def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
output_type_registry
from yt.data_objects.field_info_container import \
FieldInfoContainer, NullFunc
+from yt.utilities.minimal_representation import \
+ MinimalStaticOutput
# We want to support the movie format in the future.
# When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -115,6 +117,10 @@
except ImportError:
return s.replace(";", "*")
+ @property
+ def _mrep(self):
+ return MinimalStaticOutput(self)
+
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -296,11 +296,12 @@
def _dmpdensity(field, data):
blank = na.zeros(data.ActiveDimensions, dtype='float32')
if data.NumberOfParticles == 0: return blank
- if 'creation_time' in data.keys():
+ if 'creation_time' in data.pf.field_info:
filter = data['creation_time'] <= 0.0
if not filter.any(): return blank
else:
filter = na.ones(data.NumberOfParticles, dtype='bool')
+ if not filter.any(): return blank
amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(na.float64),
data["particle_position_y"][filter].astype(na.float64),
data["particle_position_z"][filter].astype(na.float64),
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -265,6 +265,7 @@
def _find_parameter(self, ptype, pname, scalar = False):
nn = "/%s %s" % (ptype,
{False: "runtime parameters", True: "scalars"}[scalar])
+ if nn not in self._handle: raise KeyError(nn)
for tpname, pval in self._handle[nn][:]:
if tpname.strip() == pname:
return pval
@@ -285,20 +286,26 @@
[self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
self.domain_right_edge = na.array(
[self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
- self.dimensionality = self._find_parameter("integer", "dimensionality",
- scalar = True)
# Determine domain dimensions
try:
nxb = self._find_parameter("integer", "nxb", scalar = True)
nyb = self._find_parameter("integer", "nyb", scalar = True)
nzb = self._find_parameter("integer", "nzb", scalar = True)
+ dimensionality = self._find_parameter("integer", "dimensionality",
+ scalar = True)
except KeyError:
nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
for ax in 'xyz']
+ dimensionality = 3
+ if nzb == 1: dimensionality = 2
+ if nyb == 1: dimensionality = 1
+ if dimensionality < 3:
+ mylog.warning("Guessing dimensionality as %s", dimensionality)
nblockx = self._find_parameter("integer", "nblockx")
nblocky = self._find_parameter("integer", "nblockx")
nblockz = self._find_parameter("integer", "nblockx")
+ self.dimensionality = dimensionality
self.domain_dimensions = \
na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
@@ -309,6 +316,13 @@
self.current_time = \
float(self._find_parameter("real", "time", scalar=True))
+ if self._flash_version == 7:
+ self.parameters['timestep'] = float(
+ self._handle["simulation parameters"]["timestep"])
+ else:
+ self.parameters['timestep'] = \
+ float(self._find_parameter("real", "dt", scalar=True))
+
try:
use_cosmo = self._find_parameter("logical", "usecosmology")
except:
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/3d.png
Binary file yt/gui/reason/html/images/3d.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/3d_tab.png
Binary file yt/gui/reason/html/images/3d_tab.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/binary.png
Binary file yt/gui/reason/html/images/binary.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/blockdevice.png
Binary file yt/gui/reason/html/images/blockdevice.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/blockdevice_tab.png
Binary file yt/gui/reason/html/images/blockdevice_tab.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/console.png
Binary file yt/gui/reason/html/images/console.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_down.png
Binary file yt/gui/reason/html/images/double_down.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_down_sm.png
Binary file yt/gui/reason/html/images/double_down_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_left.png
Binary file yt/gui/reason/html/images/double_left.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_left_sm.png
Binary file yt/gui/reason/html/images/double_left_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_right.png
Binary file yt/gui/reason/html/images/double_right.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_right_sm.png
Binary file yt/gui/reason/html/images/double_right_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_up.png
Binary file yt/gui/reason/html/images/double_up.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/double_up_sm.png
Binary file yt/gui/reason/html/images/double_up_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/graph.png
Binary file yt/gui/reason/html/images/graph.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/kivio_flw.png
Binary file yt/gui/reason/html/images/kivio_flw.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_down.png
Binary file yt/gui/reason/html/images/single_down.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_down_sm.png
Binary file yt/gui/reason/html/images/single_down_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_left.png
Binary file yt/gui/reason/html/images/single_left.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_left_sm.png
Binary file yt/gui/reason/html/images/single_left_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_right.png
Binary file yt/gui/reason/html/images/single_right.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_right_sm.png
Binary file yt/gui/reason/html/images/single_right_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_up.png
Binary file yt/gui/reason/html/images/single_up.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/single_up_sm.png
Binary file yt/gui/reason/html/images/single_up_sm.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/images/upload.png
Binary file yt/gui/reason/html/images/upload.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/leaflet/images/marker-shadow.png
Binary file yt/gui/reason/html/leaflet/images/marker-shadow.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/leaflet/images/marker.png
Binary file yt/gui/reason/html/leaflet/images/marker.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/leaflet/images/popup-close.png
Binary file yt/gui/reason/html/leaflet/images/popup-close.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/leaflet/images/zoom-in.png
Binary file yt/gui/reason/html/leaflet/images/zoom-in.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/gui/reason/html/leaflet/images/zoom-out.png
Binary file yt/gui/reason/html/leaflet/images/zoom-out.png has changed
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
PlotCollection, PlotCollectionInteractive, \
get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
callback_registry, write_bitmap, write_image, annotate_image, \
- apply_colormap, scale_image
+ apply_colormap, scale_image, write_projection
from yt.visualization.volume_rendering.api import \
ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
@@ -128,6 +128,10 @@
from yt.convenience import all_pfs, max_spheres, load, projload
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+ ortho_find, quartiles
+
# We load plugins. Keep in mind, this can be fairly dangerous -
# the primary purpose is to allow people to have a set of functions
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -65,6 +65,9 @@
double log2(double x)
long int lrint(double x)
double fabs(double x)
+ double cos(double x)
+ double sin(double x)
+ double asin(double x)
cdef struct Triangle:
Triangle *next
@@ -238,6 +241,33 @@
tr[i] = ipnest
return tr
+def arr_fisheye_vectors(int resolution, np.float64_t fov):
+ # We now follow figures 4-7 of:
+ # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+ # ...but all in Cython.
+ cdef np.ndarray[np.float64_t, ndim=3] vp
+ cdef int i, j, k
+ cdef np.float64_t r, phi, theta, px, py
+ cdef np.float64_t pi = 3.1415926
+ cdef np.float64_t fov_rad = fov * pi / 180.0
+ vp = np.zeros((resolution, resolution, 3), dtype="float64")
+ for i in range(resolution):
+ px = 2.0 * i / (resolution) - 1.0
+ for j in range(resolution):
+ py = 2.0 * j / (resolution) - 1.0
+ r = (px*px + py*py)**0.5
+ if r == 0.0:
+ phi = 0.0
+ elif px < 0:
+ phi = pi - asin(py / r)
+ else:
+ phi = asin(py / r)
+ theta = r * fov_rad / 2.0
+ vp[i,j,0] = sin(theta) * cos(phi)
+ vp[i,j,1] = sin(theta) * sin(phi)
+ vp[i,j,2] = cos(theta)
+ return vp
+
cdef class star_kdtree_container:
cdef kdtree_utils.kdtree *tree
cdef public np.float64_t sigma
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -280,6 +280,105 @@
)
+def _update_hg(path, skip_rebuild = False):
+ from mercurial import hg, ui, commands
+ f = open(os.path.join(path, "yt_updater.log"), "a")
+ u = ui.ui()
+ u.pushbuffer()
+ config_fn = os.path.join(path, ".hg", "hgrc")
+ print "Reading configuration from ", config_fn
+ u.readconfig(config_fn)
+ repo = hg.repository(u, path)
+ commands.pull(u, repo)
+ f.write(u.popbuffer())
+ f.write("\n\n")
+ u.pushbuffer()
+ commands.identify(u, repo)
+ if "+" in u.popbuffer():
+ print "Can't rebuild modules by myself."
+ print "You will have to do this yourself. Here's a sample commands:"
+ print
+ print " $ cd %s" % (path)
+ print " $ hg up"
+ print " $ %s setup.py develop" % (sys.executable)
+ return 1
+ print "Updating the repository"
+ f.write("Updating the repository\n\n")
+ commands.update(u, repo, check=True)
+ if skip_rebuild: return
+ f.write("Rebuilding modules\n\n")
+ p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
+ stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ stdout, stderr = p.communicate()
+ f.write(stdout)
+ f.write("\n\n")
+ if p.returncode:
+ print "BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))
+ sys.exit(1)
+ f.write("Successful!\n")
+ print "Updated successfully."
+
+def _get_hg_version(path):
+ from mercurial import hg, ui, commands
+ u = ui.ui()
+ u.pushbuffer()
+ repo = hg.repository(u, path)
+ commands.identify(u, repo)
+ return u.popbuffer()
+
+def get_yt_version():
+ import pkg_resources
+ yt_provider = pkg_resources.get_provider("yt")
+ path = os.path.dirname(yt_provider.module_path)
+ version = _get_hg_version(path)[:12]
+ return version
+
+# This code snippet is modified from Georg Brandl
+def bb_apicall(endpoint, data, use_pass = True):
+ uri = 'https://api.bitbucket.org/1.0/%s/' % endpoint
+ # since bitbucket doesn't return the required WWW-Authenticate header when
+ # making a request without Authorization, we cannot use the standard urllib2
+ # auth handlers; we have to add the requisite header from the start
+ if data is not None:
+ data = urllib.urlencode(data)
+ req = urllib2.Request(uri, data)
+ if use_pass:
+ username = raw_input("Bitbucket Username? ")
+ password = getpass.getpass()
+ upw = '%s:%s' % (username, password)
+ req.add_header('Authorization', 'Basic %s' % base64.b64encode(upw).strip())
+ return urllib2.urlopen(req).read()
+
+def _get_yt_supp(uu):
+ supp_path = os.path.join(os.environ["YT_DEST"], "src",
+ "yt-supplemental")
+ # Now we check that the supplemental repository is checked out.
+ from mercurial import hg, ui, commands
+ if not os.path.isdir(supp_path):
+ print
+ print "*** The yt-supplemental repository is not checked ***"
+ print "*** out. I can do this for you, but because this ***"
+ print "*** is a delicate act, I require you to respond ***"
+ print "*** to the prompt with the word 'yes'. ***"
+ print
+ response = raw_input("Do you want me to try to check it out? ")
+ if response != "yes":
+ print
+ print "Okay, I understand. You can check it out yourself."
+ print "This command will do it:"
+ print
+ print "$ hg clone http://hg.yt-project.org/yt-supplemental/ ",
+ print "%s" % (supp_path)
+ print
+ sys.exit(1)
+ rv = commands.clone(uu,
+ "http://hg.yt-project.org/yt-supplemental/", supp_path)
+ if rv:
+ print "Something has gone wrong. Quitting."
+ sys.exit(1)
+ # Now we think we have our supplemental repository.
+ return supp_path
+
class YTUpdateCmd(YTCommand):
name = "update"
@@ -840,7 +939,7 @@
print "*** to point to the installation location! ***"
print
sys.exit(1)
- supp_path = _get_yt_supp()
+ supp_path = _get_yt_supp(uu)
print
print "I have found the yt-supplemental repository at %s" % (supp_path)
print
@@ -1131,7 +1230,8 @@
import imp
from mercurial import hg, ui, commands, error, config
uri = "http://hub.yt-project.org/3rdparty/API/api.php"
- supp_path = _get_yt_supp()
+ uu = ui.ui()
+ supp_path = _get_yt_supp(uu)
try:
result = imp.find_module("cedit", [supp_path])
except ImportError:
@@ -1148,7 +1248,6 @@
print "Sorry, but I'm going to bail."
sys.exit(1)
hgbb = imp.load_module("hgbb", *result)
- uu = ui.ui()
try:
repo = hg.repository(uu, args.repo)
conf = config.config()
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -515,3 +515,107 @@
vec2 /= norm2
vec3 = na.cross(vec1, vec2)
return vec1, vec2, vec3
+
+def quartiles(a, axis=None, out=None, overwrite_input=False):
+ """
+ Compute the quartile values (25% and 75%) along the specified axis
+ in the same way that the numpy.median calculates the median (50%) value
+ alone a specified axis. Check numpy.median for details, as it is
+ virtually the same algorithm.
+
+ Returns an array of the quartiles of the array elements [lower quartile,
+ upper quartile].
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : {None, int}, optional
+ Axis along which the quartiles are computed. The default (axis=None)
+ is to compute the quartiles along a flattened version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : {False, True}, optional
+ If True, then allow use of memory of input array (a) for
+ calculations. The input array will be modified by the call to
+ quartiles. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. Note that, if `overwrite_input` is True and the input
+ is not already an ndarray, an error will be raised.
+
+ Returns
+ -------
+ quartiles : ndarray
+ A new 2D array holding the result (unless `out` is specified, in
+ which case that array is returned instead). If the input contains
+ integers, or floats of smaller precision than 64, then the output
+ data-type is float64. Otherwise, the output data-type is the same
+ as that of the input.
+
+ See Also
+ --------
+ numpy.median, numpy.mean, numpy.percentile
+
+ Notes
+ -----
+ Given a vector V of length N, the quartiles of V are the 25% and 75% values
+ of a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/4]`` and
+ ``3*V_sorted[(N-1)/4]``, when N is odd. When N is even, it is the average
+ of the two values bounding these values of ``V_sorted``.
+
+ Examples
+ --------
+ >>> a = na.arange(100).reshape(10,10)
+ >>> a
+ array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+ [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
+ [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
+ [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
+ [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
+ [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
+ [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
+ [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
+ >>> mu.quartiles(a)
+ array([ 24.5, 74.5])
+ >>> mu.quartiles(a,axis=0)
+ array([[ 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.],
+ [ 65., 66., 67., 68., 69., 70., 71., 72., 73., 74.]])
+ >>> mu.quartiles(a,axis=1)
+ array([[ 1.5, 11.5, 21.5, 31.5, 41.5, 51.5, 61.5, 71.5, 81.5,
+ 91.5],
+ [ 6.5, 16.5, 26.5, 36.5, 46.5, 56.5, 66.5, 76.5, 86.5,
+ 96.5]])
+ """
+ if overwrite_input:
+ if axis is None:
+ sorted = a.ravel()
+ sorted.sort()
+ else:
+ a.sort(axis=axis)
+ sorted = a
+ else:
+ sorted = na.sort(a, axis=axis)
+ if axis is None:
+ axis = 0
+ indexer = [slice(None)] * sorted.ndim
+ indices = [int(sorted.shape[axis]/4), int(sorted.shape[axis]*.75)]
+ result = []
+ for index in indices:
+ if sorted.shape[axis] % 2 == 1:
+ # index with slice to allow mean (below) to work
+ indexer[axis] = slice(index, index+1)
+ else:
+ indexer[axis] = slice(index-1, index+1)
+ # special cases for small arrays
+ if sorted.shape[axis] == 2:
+ # index with slice to allow mean (below) to work
+ indexer[axis] = slice(index, index+1)
+ # Use mean in odd and even case to coerce data type
+ # and check, use out array.
+ result.append(na.mean(sorted[indexer], axis=axis, out=out))
+ return na.array(result)
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/utilities/minimal_representation.py
--- /dev/null
+++ b/yt/utilities/minimal_representation.py
@@ -0,0 +1,106 @@
+"""
+Skeleton objects that represent a few fundamental yt data types.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2011 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import abc
+
+class ContainerClass(object):
+ pass
+
+class MinimalRepresentation(object):
+ __metaclass__ = abc.ABCMeta
+
+ def _update_attrs(self, obj, attr_list):
+ for attr in attr_list:
+ setattr(self, attr, getattr(obj, attr, None))
+ if hasattr(obj, "pf"):
+ self.output_hash = obj.pf._hash()
+
+ def __init__(self, obj):
+ self._update_attrs(obj, self._attr_list)
+
+ @abc.abstractmethod
+ def _generate_post(self):
+ pass
+
+ @abc.abstractproperty
+ def _attr_list(self):
+ pass
+
+ def _return_filtered_object(self, attrs):
+ new_attrs = tuple(attr for attr in self._attr_list
+ if attr not in attrs)
+ new_class = type('Filtered%s' % self.__class__.__name__,
+ (FilteredRepresentation,),
+ {'_attr_list': new_attrs})
+ return new_class(self)
+
+ @property
+ def _attrs(self):
+ return dict( ((attr, getattr(self, attr)) for attr in self._attr_list) )
+
+ @classmethod
+ def _from_metadata(cls, metadata):
+ cc = ContainerClass()
+ for a, v in metadata.values():
+ setattr(cc, a, v)
+ return cls(cc)
+
+class FilteredRepresentation(MinimalRepresentation):
+ def _generate_post(self):
+ raise RuntimeError
+
+class MinimalStaticOutput(MinimalRepresentation):
+ _attr_list = ("dimensionality", "refine_by", "domain_dimensions",
+ "current_time", "domain_left_edge", "domain_right_edge",
+ "unique_identifier", "current_redshift", "output_hash",
+ "cosmological_simulation", "omega_matter", "omega_lambda",
+ "hubble_constant", "name")
+
+ def __init__(self, obj):
+ super(MinimalStaticOutput, self).__init__(obj)
+ self.output_hash = obj._hash()
+ self.name = str(obj)
+
+ def _generate_post(self):
+ metadata = self._attrs
+ chunks = []
+ return metadata, chunks
+
+class MinimalMappableData(MinimalRepresentation):
+
+ weight = "None"
+ _attr_list = ("field_data", "field", "weight", "axis", "output_hash")
+
+ def _generate_post(self):
+ nobj = self._return_filtered_object(("field_data",))
+ metadata = nobj._attrs
+ chunks = [(arr, self.field_data[arr]) for arr in self.field_data]
+ return (metadata, chunks)
+
+class MinimalProjectionData(MinimalMappableData):
+
+ def __init__(self, obj):
+ super(MinimalProjectionData, self).__init__(obj)
+ self.type = "proj"
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -49,7 +49,8 @@
splat_points, \
annotate_image, \
apply_colormap, \
- scale_image
+ scale_image, \
+ write_projection
from plot_modifications import \
PlotCallback, \
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -327,3 +327,87 @@
im = image.copy()
au.add_points_to_image(im, points_x, points_y, val)
return im
+
+def write_projection(data, filename, colorbar=True, colorbar_label=None,
+ title=None, limits=None, take_log=True, var_fig_size=False):
+ r"""Write a projection or volume rendering to disk with a variety of
+ pretty parameters such as limits, title, colorbar, etc. write_projection
+ uses the standard matplotlib interface to create the figure. N.B. This code
+ only works *after* you have created the projection using the standard
+ framework (i.e. the Camera interface or off_axis_projection).
+
+ Accepts an NxM sized array representing the projection itself as well
+ as the filename to which you will save this figure.
+
+ Parameters
+ ----------
+ data : array_like
+ image array as output by off_axis_projection or camera.snapshot()
+ filename : string
+ the filename where the data will be saved
+ colorbar : boolean
+ do you want a colorbar generated to the right of the image?
+ colorbar_label : string
+ the label associated with your colorbar
+ title : string
+ the label at the top of the figure
+ limits : 2-element array_like
+ the lower limit and the upper limit to be plotted in the figure
+ of the data array
+ take_log : boolean
+ plot the log of the data array (and take the log of the limits if set)?
+ var_fig_size : boolean
+ If we want the resolution (and size) of the output image to scale
+ with the resolution of the image array.
+
+ Examples
+ --------
+
+ >>> image = off_axis_projection(pf, c, L, W, N, "Density", no_ghost=False)
+ >>> write_projection(image, 'test.png',
+ colorbar_label="Column Density (cm$^{-2}$)",
+ title="Offaxis Projection", limits=(1e-3,1e-5),
+ take_log=True)
+ """
+ import pylab as pl
+
+ # If this is rendered as log, then apply now.
+ if take_log:
+ data = na.log10(data)
+ if limits is not None:
+ limits = na.log10(limits)
+
+
+ # Create the figure and paint the data on
+ fig = pl.figure()
+ ax = fig.add_subplot(111)
+
+ if limits is not None:
+ cax = ax.imshow(data, vmin=limits[0], vmax=limits[1])
+ else:
+ cax = ax.imshow(data)
+
+ if title:
+ ax.set_title(title)
+
+ # Suppress the x and y pixel counts
+ ax.set_xticks(())
+ ax.set_yticks(())
+
+ # Add a color bar and label if requested
+ if colorbar:
+ cbar = fig.colorbar(cax)
+ if colorbar_label:
+ cbar.ax.set_ylabel(colorbar_label)
+
+ # If we want the resolution of the image to scale with the resolution
+ # of the image array. we increase the dpi value accordingly
+ if var_fig_size:
+ N = data.shape[0]
+ mag_factor = N/480.
+ pl.savefig(filename, dpi=100*mag_factor)
+ else:
+ pl.savefig(filename)
+
+ pl.clf()
+ pl.close()
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -211,9 +211,9 @@
Only ONE of the following options can be specified. If all 3 are
specified, they will be used in the following precedence order:
- * `ticks` - a list of floating point numbers at which to put ticks
- * `minmaxtick` - display DEFAULT ticks with min & max also displayed
- * `nticks` - if ticks not specified, can automatically determine a
+ * ``ticks`` - a list of floating point numbers at which to put ticks
+ * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+ * ``nticks`` - if ticks not specified, can automatically determine a
number of ticks to be evenly spaced in log space
"""
for plot in self.plots:
@@ -1713,9 +1713,9 @@
r"""Construct a multiple axes plot object, with or without a colorbar, into
which multiple plots may be inserted.
- This will create a set of `matplotlib.axes.Axes`, all lined up into a grid,
- which are then returned to the user and which can be used to plot multiple
- plots on a single figure.
+ This will create a set of :class:`matplotlib.axes.Axes`, all lined up into
+ a grid, which are then returned to the user and which can be used to plot
+ multiple plots on a single figure.
Parameters
----------
@@ -1733,12 +1733,12 @@
Returns
-------
- fig : `matplotlib.figure.Figure
+ fig : :class:`matplotlib.figure.Figure`
The figure created inside which the axes reside
- tr : list of list of `matplotlib.axes.Axes` objects
+ tr : list of list of :class:`matplotlib.axes.Axes` objects
This is a list, where the inner list is along the x-axis and the outer
is along the y-axis
- cbars : list of `matplotlib.axes.Axes` objects
+ cbars : list of :class:`matplotlib.axes.Axes` objects
Each of these is an axes onto which a colorbar can be placed.
Notes
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -170,10 +170,11 @@
Only ONE of the following options can be specified. If all 3 are
specified, they will be used in the following precedence order:
- ticks - a list of floating point numbers at which to put ticks
- minmaxtick - display DEFAULT ticks with min & max also displayed
- nticks - if ticks not specified, can automatically determine a
- number of ticks to be evenly spaced in log space
+
+ * ``ticks`` - a list of floating point numbers at which to put ticks
+ * ``minmaxtick`` - display DEFAULT ticks with min & max also displayed
+ * ``nticks`` - if ticks not specified, can automatically determine a
+ number of ticks to be evenly spaced in log space
"""
# This next call fixes some things, but is slower...
self._redraw_image()
diff -r f3ee3630f3dbb6a58deb84d3d9e27d55a7c8b451 -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -32,7 +32,7 @@
from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
- arr_ang2pix_nest
+ arr_ang2pix_nest, arr_fisheye_vectors
from yt.visualization.image_writer import write_bitmap
from yt.data_objects.data_containers import data_object_registry
from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -703,7 +703,7 @@
ax = fig.add_subplot(1,1,1,projection='mollweide')
implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
cb = fig.colorbar(implot, orientation='horizontal')
- cb.set_label(r"$\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+ cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
ax.xaxis.set_ticks(())
ax.yaxis.set_ticks(())
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
@@ -793,6 +793,59 @@
oc.sub_samples, oc.pf)
return (left_camera, right_camera)
+class FisheyeCamera(Camera):
+ def __init__(self, center, radius, fov, resolution,
+ transfer_function = None, fields = None,
+ sub_samples = 5, log_fields = None, volume = None,
+ pf = None, no_ghost=False):
+ ParallelAnalysisInterface.__init__(self)
+ if pf is not None: self.pf = pf
+ self.center = na.array(center, dtype='float64')
+ self.radius = radius
+ self.fov = fov
+ if iterable(resolution):
+ raise RuntimeError("Resolution must be a single int")
+ self.resolution = resolution
+ if transfer_function is None:
+ transfer_function = ProjectionTransferFunction()
+ self.transfer_function = transfer_function
+ if fields is None: fields = ["Density"]
+ self.fields = fields
+ self.sub_samples = sub_samples
+ self.log_fields = log_fields
+ if volume is None:
+ volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
+ log_fields=log_fields)
+ self.volume = volume
+
+ def snapshot(self):
+ image = na.zeros((self.resolution**2,1,3), dtype='float64', order='C')
+ # We now follow figures 4-7 of:
+ # http://paulbourke.net/miscellaneous/domefisheye/fisheye/
+ # ...but all in Cython.
+ vp = arr_fisheye_vectors(self.resolution, self.fov)
+ vp.shape = (self.resolution**2,1,3)
+ uv = na.ones(3, dtype='float64')
+ positions = na.ones((self.resolution**2, 1, 3), dtype='float64') * self.center
+ vector_plane = VectorPlane(positions, vp, self.center,
+ (0.0, 1.0, 0.0, 1.0), image, uv, uv)
+ tfp = TransferFunctionProxy(self.transfer_function)
+ tfp.ns = self.sub_samples
+ self.volume.initialize_source()
+ mylog.info("Rendering fisheye of %s^2", self.resolution)
+ pbar = get_pbar("Ray casting",
+ (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
+
+ total_cells = 0
+ for brick in self.volume.traverse(None, self.center, image):
+ brick.cast_plane(tfp, vector_plane)
+ total_cells += na.prod(brick.my_data[0].shape)
+ pbar.update(total_cells)
+ pbar.finish()
+ image.shape = (self.resolution, self.resolution, 3)
+ return image
+
+
def off_axis_projection(pf, center, normal_vector, width, resolution,
field, weight = None, volume = None, no_ghost = True):
r"""Project through a parameter file, off-axis, and return the image plane.
https://bitbucket.org/yt_analysis/yt/changeset/72bea510ee4a/
changeset: 72bea510ee4a
branch: yt
user: MatthewTurk
date: 2011-12-20 22:42:12
summary: Fixing up the way help is printed out. I believe that at this point we can
start integrating this with external scripts.
affected #: 2 files
diff -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba -r 72bea510ee4aae6cf2454f5fde0c35801a558091 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -111,18 +111,31 @@
parser.add_argument("--parallel", action="store_true", default=False,
dest = "parallel",
help = "Run in MPI-parallel mode (must be launched as an MPI task)")
+subparsers = parser.add_subparsers(title="subcommands",
+ dest='subcommands',
+ description="Valid subcommands",)
+
+def print_help(*args, **kwargs):
+ parser.print_help()
+help_parser = subparsers.add_parser("help")
+help_parser.set_defaults(func=print_help)
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
+parallel_capable = False
if not ytcfg.getboolean("yt","__command_line"):
- # Alternate:
- # opts, args = parser.parse_known_args()
- opts = parser.parse_args()
+ opts, args = parser.parse_known_args()
+ # THIS IS NOT SUCH A GOOD IDEA:
+ sys.argv = [a for a in args]
+ if opts.parallel:
+ parallel_capable = turn_on_parallelism()
-if exe_name in \
+if parallel_capable == True:
+ pass
+elif exe_name in \
["mpi4py", "embed_enzo",
"python"+sys.version[:3]+"-mpi"] \
- or opts.parallel or '_parallel' in dir(sys) \
+ or '_parallel' in dir(sys) \
or any(["ipengine" in arg for arg in sys.argv]):
parallel_capable = turn_on_parallelism()
else:
diff -r 70d12691846be87ec96edc9ee4fc1841a8b4d8ba -r 72bea510ee4aae6cf2454f5fde0c35801a558091 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -25,7 +25,7 @@
from yt.config import ytcfg
ytcfg["yt","__command_line"] = "True"
-from yt.startup_tasks import parser
+from yt.startup_tasks import parser, subparsers
from yt.mods import *
from yt.funcs import *
import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
@@ -44,9 +44,6 @@
pf = load(arg)
return pf
-subparsers = parser.add_subparsers(title="subcommands",
- description="Valid subcommands",)
-
def _add_arg(sc, arg):
if isinstance(arg, types.StringTypes):
arg = _common_options[arg].copy()
@@ -67,7 +64,8 @@
type.__init__(cls, name, b, d)
if cls.name is not None:
sc = subparsers.add_parser(cls.name,
- description = cls.description)
+ description = cls.description,
+ help = cls.description)
sc.set_defaults(func=cls.run)
for arg in cls.args:
_add_arg(sc, arg)
https://bitbucket.org/yt_analysis/yt/changeset/08b467dd9208/
changeset: 08b467dd9208
branch: yt
user: MatthewTurk
date: 2011-12-20 22:49:00
summary: Fixing an issue that prevented importing yt in scripts.
affected #: 1 file
diff -r 72bea510ee4aae6cf2454f5fde0c35801a558091 -r 08b467dd9208eed5982c9cacad675eaf6f47c9da yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -111,15 +111,6 @@
parser.add_argument("--parallel", action="store_true", default=False,
dest = "parallel",
help = "Run in MPI-parallel mode (must be launched as an MPI task)")
-subparsers = parser.add_subparsers(title="subcommands",
- dest='subcommands',
- description="Valid subcommands",)
-
-def print_help(*args, **kwargs):
- parser.print_help()
-help_parser = subparsers.add_parser("help")
-help_parser.set_defaults(func=print_help)
-
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
parallel_capable = False
@@ -129,6 +120,15 @@
sys.argv = [a for a in args]
if opts.parallel:
parallel_capable = turn_on_parallelism()
+else:
+ subparsers = parser.add_subparsers(title="subcommands",
+ dest='subcommands',
+ description="Valid subcommands",)
+ def print_help(*args, **kwargs):
+ parser.print_help()
+ help_parser = subparsers.add_parser("help", help="Print help message")
+ help_parser.set_defaults(func=print_help)
+
if parallel_capable == True:
pass
https://bitbucket.org/yt_analysis/yt/changeset/767fe70ff1c8/
changeset: 767fe70ff1c8
branch: yt
user: MatthewTurk
date: 2011-12-20 22:51:44
summary: Moving to include full set of args in sys.args. Note that this will keep
around the leftover args in yt.startup_tasks.leftover_args .
affected #: 1 file
diff -r 08b467dd9208eed5982c9cacad675eaf6f47c9da -r 767fe70ff1c811cecac7e18b13932a94a17cc87b yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -113,11 +113,13 @@
help = "Run in MPI-parallel mode (must be launched as an MPI task)")
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
+leftover_args = []
+
parallel_capable = False
if not ytcfg.getboolean("yt","__command_line"):
- opts, args = parser.parse_known_args()
+ opts, leftover_args = parser.parse_known_args()
# THIS IS NOT SUCH A GOOD IDEA:
- sys.argv = [a for a in args]
+ #sys.argv = [a for a in leftover_args]
if opts.parallel:
parallel_capable = turn_on_parallelism()
else:
https://bitbucket.org/yt_analysis/yt/changeset/e1ba861e4f74/
changeset: e1ba861e4f74
branch: yt
user: MatthewTurk
date: 2011-12-20 23:15:08
summary: Changing leftover_args to unparsed_args and putting that in yt.mods.
affected #: 2 files
diff -r 767fe70ff1c811cecac7e18b13932a94a17cc87b -r e1ba861e4f74d3151e6f35b9aade388e51143f28 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -40,6 +40,7 @@
# operations.
import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
from yt.funcs import *
from yt.utilities.logger import ytLogger as mylog
diff -r 767fe70ff1c811cecac7e18b13932a94a17cc87b -r e1ba861e4f74d3151e6f35b9aade388e51143f28 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -113,13 +113,13 @@
help = "Run in MPI-parallel mode (must be launched as an MPI task)")
if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
-leftover_args = []
+unparsed_args = []
parallel_capable = False
if not ytcfg.getboolean("yt","__command_line"):
- opts, leftover_args = parser.parse_known_args()
+ opts, unparsed_args = parser.parse_known_args()
# THIS IS NOT SUCH A GOOD IDEA:
- #sys.argv = [a for a in leftover_args]
+ #sys.argv = [a for a in unparsed_args]
if opts.parallel:
parallel_capable = turn_on_parallelism()
else:
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list