[Yt-svn] commit/yt-doc: 5 new changesets
Bitbucket
commits-noreply at bitbucket.org
Sun Apr 3 17:27:17 PDT 2011
5 new changesets in yt-doc:
http://bitbucket.org/yt_analysis/yt-doc/changeset/513d4af349dc/
changeset: r52:513d4af349dc
user: MatthewTurk
date: 2011-03-25 06:16:23
summary: Adding a slightly modified numpydoc extension
affected #: 11 files (87.1 KB)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/README Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,4 @@
+This includes a version of the Numpy Documentation extension that has been
+slightly modified to emit extra TOC tree items.
+
+-- Matt Turk, March 25, 2011
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/__init__.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,1 @@
+from numpydoc import setup
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/comment_eater.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,158 @@
+from cStringIO import StringIO
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from compiler_unparse import unparse
+
+
+class Comment(object):
+ """ A comment block.
+ """
+ is_comment = True
+ def __init__(self, start_lineno, end_lineno, text):
+ # int : The first line number in the block. 1-indexed.
+ self.start_lineno = start_lineno
+ # int : The last line number. Inclusive!
+ self.end_lineno = end_lineno
+ # str : The text block including '#' character but not any leading spaces.
+ self.text = text
+
+ def add(self, string, start, end, line):
+ """ Add a new comment line.
+ """
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+ self.text += string
+
+ def __repr__(self):
+ return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno, self.text)
+
+
+class NonComment(object):
+ """ A non-comment block of code.
+ """
+ is_comment = False
+ def __init__(self, start_lineno, end_lineno):
+ self.start_lineno = start_lineno
+ self.end_lineno = end_lineno
+
+ def add(self, string, start, end, line):
+ """ Add lines to the block.
+ """
+ if string.strip():
+ # Only add if not entirely whitespace.
+ self.start_lineno = min(self.start_lineno, start[0])
+ self.end_lineno = max(self.end_lineno, end[0])
+
+ def __repr__(self):
+ return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+ self.end_lineno)
+
+
+class CommentBlocker(object):
+ """ Pull out contiguous comment blocks.
+ """
+ def __init__(self):
+ # Start with a dummy.
+ self.current_block = NonComment(0, 0)
+
+ # All of the blocks seen so far.
+ self.blocks = []
+
+ # The index mapping lines of code to their associated comment blocks.
+ self.index = {}
+
+ def process_file(self, file):
+ """ Process a file object.
+ """
+ for token in tokenize.generate_tokens(file.next):
+ self.process_token(*token)
+ self.make_index()
+
+ def process_token(self, kind, string, start, end, line):
+ """ Process a single token.
+ """
+ if self.current_block.is_comment:
+ if kind == tokenize.COMMENT:
+ self.current_block.add(string, start, end, line)
+ else:
+ self.new_noncomment(start[0], end[0])
+ else:
+ if kind == tokenize.COMMENT:
+ self.new_comment(string, start, end, line)
+ else:
+ self.current_block.add(string, start, end, line)
+
+ def new_noncomment(self, start_lineno, end_lineno):
+ """ We are transitioning from a noncomment to a comment.
+ """
+ block = NonComment(start_lineno, end_lineno)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def new_comment(self, string, start, end, line):
+ """ Possibly add a new comment.
+
+ Only adds a new comment if this comment is the only thing on the line.
+ Otherwise, it extends the noncomment block.
+ """
+ prefix = line[:start[1]]
+ if prefix.strip():
+ # Oops! Trailing comment, not a comment block.
+ self.current_block.add(string, start, end, line)
+ else:
+ # A comment block.
+ block = Comment(start[0], end[0], string)
+ self.blocks.append(block)
+ self.current_block = block
+
+ def make_index(self):
+ """ Make the index mapping lines of actual code to their associated
+ prefix comments.
+ """
+ for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+ if not block.is_comment:
+ self.index[block.start_lineno] = prev
+
+ def search_for_comment(self, lineno, default=None):
+ """ Find the comment block just before the given line number.
+
+ Returns None (or the specified default) if there is no such block.
+ """
+ if not self.index:
+ self.make_index()
+ block = self.index.get(lineno, None)
+ text = getattr(block, 'text', default)
+ return text
+
+
+def strip_comment_marker(text):
+ """ Strip # markers at the front of a block of comment text.
+ """
+ lines = []
+ for line in text.splitlines():
+ lines.append(line.lstrip('#'))
+ text = textwrap.dedent('\n'.join(lines))
+ return text
+
+
+def get_class_traits(klass):
+ """ Yield all of the documentation for trait definitions on a class object.
+ """
+ # FIXME: gracefully handle errors here or in the caller?
+ source = inspect.getsource(klass)
+ cb = CommentBlocker()
+ cb.process_file(StringIO(source))
+ mod_ast = compiler.parse(source)
+ class_ast = mod_ast.node.nodes[0]
+ for node in class_ast.code.nodes:
+ # FIXME: handle other kinds of assignments?
+ if isinstance(node, compiler.ast.Assign):
+ name = node.nodes[0].name
+ rhs = unparse(node.expr).strip()
+ doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+ yield name, rhs, doc
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/compiler_unparse.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,860 @@
+""" Turn compiler.ast structures back into executable python code.
+
+ The unparse method takes a compiler.ast tree and transforms it back into
+ valid python code. It is incomplete and currently only works for
+ import statements, function calls, function definitions, assignments, and
+ basic expressions.
+
+ Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+ fixme: We may want to move to using _ast trees because the compiler for
+ them is about 6 times faster than compiler.compile.
+"""
+
+import sys
+import cStringIO
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+def unparse(ast, single_line_functions=False):
+ s = cStringIO.StringIO()
+ UnparseCompilerAst(ast, s, single_line_functions)
+ return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+ 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+ """ Methods in this class recursively traverse an AST and
+ output source code for the abstract syntax; original formatting
+ is disregarged.
+ """
+
+ #########################################################################
+ # object interface.
+ #########################################################################
+
+ def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+ """ Unparser(tree, file=sys.stdout) -> None.
+
+ Print the source for tree to file.
+ """
+ self.f = file
+ self._single_func = single_line_functions
+ self._do_indent = True
+ self._indent = 0
+ self._dispatch(tree)
+ self._write("\n")
+ self.f.flush()
+
+ #########################################################################
+ # Unparser private interface.
+ #########################################################################
+
+ ### format, output, and dispatch methods ################################
+
+ def _fill(self, text = ""):
+ "Indent a piece of text, according to the current indentation level"
+ if self._do_indent:
+ self._write("\n"+" "*self._indent + text)
+ else:
+ self._write(text)
+
+ def _write(self, text):
+ "Append a piece of text to the current line."
+ self.f.write(text)
+
+ def _enter(self):
+ "Print ':', and increase the indentation."
+ self._write(": ")
+ self._indent += 1
+
+ def _leave(self):
+ "Decrease the indentation level."
+ self._indent -= 1
+
+ def _dispatch(self, tree):
+ "_dispatcher function, _dispatching tree type T to method _T."
+ if isinstance(tree, list):
+ for t in tree:
+ self._dispatch(t)
+ return
+ meth = getattr(self, "_"+tree.__class__.__name__)
+ if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+ return
+ meth(tree)
+
+
+ #########################################################################
+ # compiler.ast unparsing methods.
+ #
+ # There should be one method per concrete grammar type. They are
+ # organized in alphabetical order.
+ #########################################################################
+
+ def _Add(self, t):
+ self.__binary_op(t, '+')
+
+ def _And(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") and (")
+ self._write(")")
+
+ def _AssAttr(self, t):
+ """ Handle assigning an attribute of an object
+ """
+ self._dispatch(t.expr)
+ self._write('.'+t.attrname)
+
+ def _Assign(self, t):
+ """ Expression Assignment such as "a = 1".
+
+ This only handles assignment in expressions. Keyword assignment
+ is handled separately.
+ """
+ self._fill()
+ for target in t.nodes:
+ self._dispatch(target)
+ self._write(" = ")
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _AssName(self, t):
+ """ Name on left hand side of expression.
+
+ Treat just like a name on the right side of an expression.
+ """
+ self._Name(t)
+
+ def _AssTuple(self, t):
+ """ Tuple on left hand side of an expression.
+ """
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ def _AugAssign(self, t):
+ """ +=,-=,*=,/=,**=, etc. operations
+ """
+
+ self._fill()
+ self._dispatch(t.node)
+ self._write(' '+t.op+' ')
+ self._dispatch(t.expr)
+ if not self._do_indent:
+ self._write(';')
+
+ def _Bitand(self, t):
+ """ Bit and operation.
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" & ")
+
+ def _Bitor(self, t):
+ """ Bit or operation
+ """
+
+ for i, node in enumerate(t.nodes):
+ self._write("(")
+ self._dispatch(node)
+ self._write(")")
+ if i != len(t.nodes)-1:
+ self._write(" | ")
+
+ def _CallFunc(self, t):
+ """ Function call.
+ """
+ self._dispatch(t.node)
+ self._write("(")
+ comma = False
+ for e in t.args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._dispatch(e)
+ if t.star_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("*")
+ self._dispatch(t.star_args)
+ if t.dstar_args:
+ if comma: self._write(", ")
+ else: comma = True
+ self._write("**")
+ self._dispatch(t.dstar_args)
+ self._write(")")
+
+ def _Compare(self, t):
+ self._dispatch(t.expr)
+ for op, expr in t.ops:
+ self._write(" " + op + " ")
+ self._dispatch(expr)
+
+ def _Const(self, t):
+ """ A constant value such as an integer value, 3, or a string, "hello".
+ """
+ self._dispatch(t.value)
+
+ def _Decorators(self, t):
+ """ Handle function decorators (eg. @has_units)
+ """
+ for node in t.nodes:
+ self._dispatch(node)
+
+ def _Dict(self, t):
+ self._write("{")
+ for i, (k, v) in enumerate(t.items):
+ self._dispatch(k)
+ self._write(": ")
+ self._dispatch(v)
+ if i < len(t.items)-1:
+ self._write(", ")
+ self._write("}")
+
+ def _Discard(self, t):
+ """ Node for when return value is ignored such as in "foo(a)".
+ """
+ self._fill()
+ self._dispatch(t.expr)
+
+ def _Div(self, t):
+ self.__binary_op(t, '/')
+
+ def _Ellipsis(self, t):
+ self._write("...")
+
+ def _From(self, t):
+ """ Handle "from xyz import foo, bar as baz".
+ """
+ # fixme: Are From and ImportFrom handled differently?
+ self._fill("from ")
+ self._write(t.modname)
+ self._write(" import ")
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Function(self, t):
+ """ Handle function definitions
+ """
+ if t.decorators is not None:
+ self._fill("@")
+ self._dispatch(t.decorators)
+ self._fill("def "+t.name + "(")
+ defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+ for i, arg in enumerate(zip(t.argnames, defaults)):
+ self._write(arg[0])
+ if arg[1] is not None:
+ self._write('=')
+ self._dispatch(arg[1])
+ if i < len(t.argnames)-1:
+ self._write(', ')
+ self._write(")")
+ if self._single_func:
+ self._do_indent = False
+ self._enter()
+ self._dispatch(t.code)
+ self._leave()
+ self._do_indent = True
+
+ def _Getattr(self, t):
+ """ Handle getting an attribute of an object
+ """
+ if isinstance(t.expr, (Div, Mul, Sub, Add)):
+ self._write('(')
+ self._dispatch(t.expr)
+ self._write(')')
+ else:
+ self._dispatch(t.expr)
+
+ self._write('.'+t.attrname)
+
+ def _If(self, t):
+ self._fill()
+
+ for i, (compare,code) in enumerate(t.tests):
+ if i == 0:
+ self._write("if ")
+ else:
+ self._write("elif ")
+ self._dispatch(compare)
+ self._enter()
+ self._fill()
+ self._dispatch(code)
+ self._leave()
+ self._write("\n")
+
+ if t.else_ is not None:
+ self._write("else")
+ self._enter()
+ self._fill()
+ self._dispatch(t.else_)
+ self._leave()
+ self._write("\n")
+
+ def _IfExp(self, t):
+ self._dispatch(t.then)
+ self._write(" if ")
+ self._dispatch(t.test)
+
+ if t.else_ is not None:
+ self._write(" else (")
+ self._dispatch(t.else_)
+ self._write(")")
+
+ def _Import(self, t):
+ """ Handle "import xyz.foo".
+ """
+ self._fill("import ")
+
+ for i, (name,asname) in enumerate(t.names):
+ if i != 0:
+ self._write(", ")
+ self._write(name)
+ if asname is not None:
+ self._write(" as "+asname)
+
+ def _Keyword(self, t):
+ """ Keyword value assignment within function calls and definitions.
+ """
+ self._write(t.name)
+ self._write("=")
+ self._dispatch(t.expr)
+
+ def _List(self, t):
+ self._write("[")
+ for i,node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i < len(t.nodes)-1:
+ self._write(", ")
+ self._write("]")
+
+ def _Module(self, t):
+ if t.doc is not None:
+ self._dispatch(t.doc)
+ self._dispatch(t.node)
+
+ def _Mul(self, t):
+ self.__binary_op(t, '*')
+
+ def _Name(self, t):
+ self._write(t.name)
+
+ def _NoneType(self, t):
+ self._write("None")
+
+ def _Not(self, t):
+ self._write('not (')
+ self._dispatch(t.expr)
+ self._write(')')
+
+ def _Or(self, t):
+ self._write(" (")
+ for i, node in enumerate(t.nodes):
+ self._dispatch(node)
+ if i != len(t.nodes)-1:
+ self._write(") or (")
+ self._write(")")
+
+ def _Pass(self, t):
+ self._write("pass\n")
+
+ def _Printnl(self, t):
+ self._fill("print ")
+ if t.dest:
+ self._write(">> ")
+ self._dispatch(t.dest)
+ self._write(", ")
+ comma = False
+ for node in t.nodes:
+ if comma: self._write(', ')
+ else: comma = True
+ self._dispatch(node)
+
+ def _Power(self, t):
+ self.__binary_op(t, '**')
+
+ def _Return(self, t):
+ self._fill("return ")
+ if t.value:
+ if isinstance(t.value, Tuple):
+ text = ', '.join([ name.name for name in t.value.asList() ])
+ self._write(text)
+ else:
+ self._dispatch(t.value)
+ if not self._do_indent:
+ self._write('; ')
+
+ def _Slice(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ if t.lower:
+ self._dispatch(t.lower)
+ self._write(":")
+ if t.upper:
+ self._dispatch(t.upper)
+ #if t.step:
+ # self._write(":")
+ # self._dispatch(t.step)
+ self._write("]")
+
+ def _Sliceobj(self, t):
+ for i, node in enumerate(t.nodes):
+ if i != 0:
+ self._write(":")
+ if not (isinstance(node, Const) and node.value is None):
+ self._dispatch(node)
+
+ def _Stmt(self, tree):
+ for node in tree.nodes:
+ self._dispatch(node)
+
+ def _Sub(self, t):
+ self.__binary_op(t, '-')
+
+ def _Subscript(self, t):
+ self._dispatch(t.expr)
+ self._write("[")
+ for i, value in enumerate(t.subs):
+ if i != 0:
+ self._write(",")
+ self._dispatch(value)
+ self._write("]")
+
+ def _TryExcept(self, t):
+ self._fill("try")
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+
+ for handler in t.handlers:
+ self._fill('except ')
+ self._dispatch(handler[0])
+ if handler[1] is not None:
+ self._write(', ')
+ self._dispatch(handler[1])
+ self._enter()
+ self._dispatch(handler[2])
+ self._leave()
+
+ if t.else_:
+ self._fill("else")
+ self._enter()
+ self._dispatch(t.else_)
+ self._leave()
+
+ def _Tuple(self, t):
+
+ if not t.nodes:
+ # Empty tuple.
+ self._write("()")
+ else:
+ self._write("(")
+
+ # _write each elements, separated by a comma.
+ for element in t.nodes[:-1]:
+ self._dispatch(element)
+ self._write(", ")
+
+ # Handle the last one without writing comma
+ last_element = t.nodes[-1]
+ self._dispatch(last_element)
+
+ self._write(")")
+
+ def _UnaryAdd(self, t):
+ self._write("+")
+ self._dispatch(t.expr)
+
+ def _UnarySub(self, t):
+ self._write("-")
+ self._dispatch(t.expr)
+
+ def _With(self, t):
+ self._fill('with ')
+ self._dispatch(t.expr)
+ if t.vars:
+ self._write(' as ')
+ self._dispatch(t.vars.name)
+ self._enter()
+ self._dispatch(t.body)
+ self._leave()
+ self._write('\n')
+
+ def _int(self, t):
+ self._write(repr(t))
+
+ def __binary_op(self, t, symbol):
+ # Check if parenthesis are needed on left side and then dispatch
+ has_paren = False
+ left_class = str(t.left.__class__)
+ if (left_class in op_precedence.keys() and
+ op_precedence[left_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.left)
+ if has_paren:
+ self._write(')')
+ # Write the appropriate symbol for operator
+ self._write(symbol)
+ # Check if parenthesis are needed on the right side and then dispatch
+ has_paren = False
+ right_class = str(t.right.__class__)
+ if (right_class in op_precedence.keys() and
+ op_precedence[right_class] < op_precedence[str(t.__class__)]):
+ has_paren = True
+ if has_paren:
+ self._write('(')
+ self._dispatch(t.right)
+ if has_paren:
+ self._write(')')
+
+ def _float(self, t):
+ # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+ # We prefer str here.
+ self._write(str(t))
+
+ def _str(self, t):
+ self._write(repr(t))
+
+ def _tuple(self, t):
+ self._write(str(t))
+
+ #########################################################################
+ # These are the methods from the _ast modules unparse.
+ #
+ # As our needs to handle more advanced code increase, we may want to
+ # modify some of the methods below so that they work for compiler.ast.
+ #########################################################################
+
+# # stmt
+# def _Expr(self, tree):
+# self._fill()
+# self._dispatch(tree.value)
+#
+# def _Import(self, t):
+# self._fill("import ")
+# first = True
+# for a in t.names:
+# if first:
+# first = False
+# else:
+# self._write(", ")
+# self._write(a.name)
+# if a.asname:
+# self._write(" as "+a.asname)
+#
+## def _ImportFrom(self, t):
+## self._fill("from ")
+## self._write(t.module)
+## self._write(" import ")
+## for i, a in enumerate(t.names):
+## if i == 0:
+## self._write(", ")
+## self._write(a.name)
+## if a.asname:
+## self._write(" as "+a.asname)
+## # XXX(jpe) what is level for?
+##
+#
+# def _Break(self, t):
+# self._fill("break")
+#
+# def _Continue(self, t):
+# self._fill("continue")
+#
+# def _Delete(self, t):
+# self._fill("del ")
+# self._dispatch(t.targets)
+#
+# def _Assert(self, t):
+# self._fill("assert ")
+# self._dispatch(t.test)
+# if t.msg:
+# self._write(", ")
+# self._dispatch(t.msg)
+#
+# def _Exec(self, t):
+# self._fill("exec ")
+# self._dispatch(t.body)
+# if t.globals:
+# self._write(" in ")
+# self._dispatch(t.globals)
+# if t.locals:
+# self._write(", ")
+# self._dispatch(t.locals)
+#
+# def _Print(self, t):
+# self._fill("print ")
+# do_comma = False
+# if t.dest:
+# self._write(">>")
+# self._dispatch(t.dest)
+# do_comma = True
+# for e in t.values:
+# if do_comma:self._write(", ")
+# else:do_comma=True
+# self._dispatch(e)
+# if not t.nl:
+# self._write(",")
+#
+# def _Global(self, t):
+# self._fill("global")
+# for i, n in enumerate(t.names):
+# if i != 0:
+# self._write(",")
+# self._write(" " + n)
+#
+# def _Yield(self, t):
+# self._fill("yield")
+# if t.value:
+# self._write(" (")
+# self._dispatch(t.value)
+# self._write(")")
+#
+# def _Raise(self, t):
+# self._fill('raise ')
+# if t.type:
+# self._dispatch(t.type)
+# if t.inst:
+# self._write(", ")
+# self._dispatch(t.inst)
+# if t.tback:
+# self._write(", ")
+# self._dispatch(t.tback)
+#
+#
+# def _TryFinally(self, t):
+# self._fill("try")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# self._fill("finally")
+# self._enter()
+# self._dispatch(t.finalbody)
+# self._leave()
+#
+# def _excepthandler(self, t):
+# self._fill("except ")
+# if t.type:
+# self._dispatch(t.type)
+# if t.name:
+# self._write(", ")
+# self._dispatch(t.name)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _ClassDef(self, t):
+# self._write("\n")
+# self._fill("class "+t.name)
+# if t.bases:
+# self._write("(")
+# for a in t.bases:
+# self._dispatch(a)
+# self._write(", ")
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _FunctionDef(self, t):
+# self._write("\n")
+# for deco in t.decorators:
+# self._fill("@")
+# self._dispatch(deco)
+# self._fill("def "+t.name + "(")
+# self._dispatch(t.args)
+# self._write(")")
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+#
+# def _For(self, t):
+# self._fill("for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# def _While(self, t):
+# self._fill("while ")
+# self._dispatch(t.test)
+# self._enter()
+# self._dispatch(t.body)
+# self._leave()
+# if t.orelse:
+# self._fill("else")
+# self._enter()
+# self._dispatch(t.orelse)
+# self._leave
+#
+# # expr
+# def _Str(self, tree):
+# self._write(repr(tree.s))
+##
+# def _Repr(self, t):
+# self._write("`")
+# self._dispatch(t.value)
+# self._write("`")
+#
+# def _Num(self, t):
+# self._write(repr(t.n))
+#
+# def _ListComp(self, t):
+# self._write("[")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write("]")
+#
+# def _GeneratorExp(self, t):
+# self._write("(")
+# self._dispatch(t.elt)
+# for gen in t.generators:
+# self._dispatch(gen)
+# self._write(")")
+#
+# def _comprehension(self, t):
+# self._write(" for ")
+# self._dispatch(t.target)
+# self._write(" in ")
+# self._dispatch(t.iter)
+# for if_clause in t.ifs:
+# self._write(" if ")
+# self._dispatch(if_clause)
+#
+# def _IfExp(self, t):
+# self._dispatch(t.body)
+# self._write(" if ")
+# self._dispatch(t.test)
+# if t.orelse:
+# self._write(" else ")
+# self._dispatch(t.orelse)
+#
+# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+# def _UnaryOp(self, t):
+# self._write(self.unop[t.op.__class__.__name__])
+# self._write("(")
+# self._dispatch(t.operand)
+# self._write(")")
+#
+# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+# "FloorDiv":"//", "Pow": "**"}
+# def _BinOp(self, t):
+# self._write("(")
+# self._dispatch(t.left)
+# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+# self._dispatch(t.right)
+# self._write(")")
+#
+# boolops = {_ast.And: 'and', _ast.Or: 'or'}
+# def _BoolOp(self, t):
+# self._write("(")
+# self._dispatch(t.values[0])
+# for v in t.values[1:]:
+# self._write(" %s " % self.boolops[t.op.__class__])
+# self._dispatch(v)
+# self._write(")")
+#
+# def _Attribute(self,t):
+# self._dispatch(t.value)
+# self._write(".")
+# self._write(t.attr)
+#
+## def _Call(self, t):
+## self._dispatch(t.func)
+## self._write("(")
+## comma = False
+## for e in t.args:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## for e in t.keywords:
+## if comma: self._write(", ")
+## else: comma = True
+## self._dispatch(e)
+## if t.starargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("*")
+## self._dispatch(t.starargs)
+## if t.kwargs:
+## if comma: self._write(", ")
+## else: comma = True
+## self._write("**")
+## self._dispatch(t.kwargs)
+## self._write(")")
+#
+# # slice
+# def _Index(self, t):
+# self._dispatch(t.value)
+#
+# def _ExtSlice(self, t):
+# for i, d in enumerate(t.dims):
+# if i != 0:
+# self._write(': ')
+# self._dispatch(d)
+#
+# # others
+# def _arguments(self, t):
+# first = True
+# nonDef = len(t.args)-len(t.defaults)
+# for a in t.args[0:nonDef]:
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a)
+# for a,d in zip(t.args[nonDef:], t.defaults):
+# if first:first = False
+# else: self._write(", ")
+# self._dispatch(a),
+# self._write("=")
+# self._dispatch(d)
+# if t.vararg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("*"+t.vararg)
+# if t.kwarg:
+# if first:first = False
+# else: self._write(", ")
+# self._write("**"+t.kwarg)
+#
+## def _keyword(self, t):
+## self._write(t.arg)
+## self._write("=")
+## self._dispatch(t.value)
+#
+# def _Lambda(self, t):
+# self._write("lambda ")
+# self._dispatch(t.args)
+# self._write(": ")
+# self._dispatch(t.body)
+
+
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/docscrape.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,500 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+
+class Reader(object):
+ """A line-based string reader.
+
+ """
+ def __init__(self, data):
+ """
+ Parameters
+ ----------
+ data : str
+ String with lines separated by '\n'.
+
+ """
+ if isinstance(data,list):
+ self._str = data
+ else:
+ self._str = data.split('\n') # store string as list of lines
+
+ self.reset()
+
+ def __getitem__(self, n):
+ return self._str[n]
+
+ def reset(self):
+ self._l = 0 # current line nr
+
+ def read(self):
+ if not self.eof():
+ out = self[self._l]
+ self._l += 1
+ return out
+ else:
+ return ''
+
+ def seek_next_non_empty_line(self):
+ for l in self[self._l:]:
+ if l.strip():
+ break
+ else:
+ self._l += 1
+
+ def eof(self):
+ return self._l >= len(self._str)
+
+ def read_to_condition(self, condition_func):
+ start = self._l
+ for line in self[start:]:
+ if condition_func(line):
+ return self[start:self._l]
+ self._l += 1
+ if self.eof():
+ return self[start:self._l+1]
+ return []
+
+ def read_to_next_empty_line(self):
+ self.seek_next_non_empty_line()
+ def is_empty(line):
+ return not line.strip()
+ return self.read_to_condition(is_empty)
+
+ def read_to_next_unindented_line(self):
+ def is_unindented(line):
+ return (line.strip() and (len(line.lstrip()) == len(line)))
+ return self.read_to_condition(is_unindented)
+
+ def peek(self,n=0):
+ if self._l + n < len(self._str):
+ return self[self._l + n]
+ else:
+ return ''
+
+ def is_empty(self):
+ return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+ def __init__(self, docstring, config={}):
+ docstring = textwrap.dedent(docstring).split('\n')
+
+ self._doc = Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': [''],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Attributes': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'Warnings': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def __getitem__(self,key):
+ return self._parsed_data[key]
+
+ def __setitem__(self,key,val):
+ if not self._parsed_data.has_key(key):
+ warn("Unknown section %s" % key)
+ else:
+ self._parsed_data[key] = val
+
+ def _is_at_section(self):
+ self._doc.seek_next_non_empty_line()
+
+ if self._doc.eof():
+ return False
+
+ l1 = self._doc.peek().strip() # e.g. Parameters
+
+ if l1.startswith('.. index::'):
+ return True
+
+ l2 = self._doc.peek(1).strip() # ---------- or ==========
+ return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+ def _strip(self,doc):
+ i = 0
+ j = 0
+ for i,line in enumerate(doc):
+ if line.strip(): break
+
+ for j,line in enumerate(doc[::-1]):
+ if line.strip(): break
+
+ return doc[i:len(doc)-j]
+
+ def _read_to_next_section(self):
+ section = self._doc.read_to_next_empty_line()
+
+ while not self._is_at_section() and not self._doc.eof():
+ if not self._doc.peek(-1).strip(): # previous line was empty
+ section += ['']
+
+ section += self._doc.read_to_next_empty_line()
+
+ return section
+
+ def _read_sections(self):
+ while not self._doc.eof():
+ data = self._read_to_next_section()
+ name = data[0].strip()
+
+ if name.startswith('..'): # index section
+ yield name, data[1:]
+ elif len(data) < 2:
+ yield StopIteration
+ else:
+ yield name, self._strip(data[2:])
+
+ def _parse_param_list(self,content):
+ r = Reader(content)
+ params = []
+ while not r.eof():
+ header = r.read().strip()
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ')[:2]
+ else:
+ arg_name, arg_type = header, ''
+
+ desc = r.read_to_next_unindented_line()
+ desc = dedent_lines(desc)
+
+ params.append((arg_name,arg_type,desc))
+
+ return params
+
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+ def _parse_see_also(self, content):
+ """
+ func_name : Descriptive text
+ continued text
+ another_func_name : Descriptive text
+ func_name1, func_name2, :meth:`func_name`, func_name3
+
+ """
+ items = []
+
+ def parse_item_name(text):
+ """Match ':role:`name`' or 'name'"""
+ m = self._name_rgx.match(text)
+ if m:
+ g = m.groups()
+ if g[1] is None:
+ return g[3], None
+ else:
+ return g[2], g[1]
+ raise ValueError("%s is not a item name" % text)
+
+ def push_item(name, rest):
+ if not name:
+ return
+ name, role = parse_item_name(name)
+ items.append((name, list(rest), role))
+ del rest[:]
+
+ current_func = None
+ rest = []
+
+ for line in content:
+ if not line.strip(): continue
+
+ m = self._name_rgx.match(line)
+ if m and line[m.end():].strip().startswith(':'):
+ push_item(current_func, rest)
+ current_func, line = line[:m.end()], line[m.end():]
+ rest = [line.split(':', 1)[1].strip()]
+ if not rest[0]:
+ rest = []
+ elif not line.startswith(' '):
+ push_item(current_func, rest)
+ current_func = None
+ if ',' in line:
+ for func in line.split(','):
+ if func.strip():
+ push_item(func, [])
+ elif line.strip():
+ current_func = line
+ elif current_func is not None:
+ rest.append(line.strip())
+ push_item(current_func, rest)
+ return items
+
+ def _parse_index(self, section, content):
+ """
+ .. index: default
+ :refguide: something, else, and more
+
+ """
+ def strip_each_in(lst):
+ return [s.strip() for s in lst]
+
+ out = {}
+ section = section.split('::')
+ if len(section) > 1:
+ out['default'] = strip_each_in(section[1].split(','))[0]
+ for line in content:
+ line = line.split(':')
+ if len(line) > 2:
+ out[line[1]] = strip_each_in(line[2].split(','))
+ return out
+
+ def _parse_summary(self):
+ """Grab signature (if given) and summary"""
+ if self._is_at_section():
+ return
+
+ summary = self._doc.read_to_next_empty_line()
+ summary_str = " ".join([s.strip() for s in summary]).strip()
+ if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+ self['Signature'] = summary_str
+ if not self._is_at_section():
+ self['Summary'] = self._doc.read_to_next_empty_line()
+ else:
+ self['Summary'] = summary
+
+ if not self._is_at_section():
+ self['Extended Summary'] = self._read_to_next_section()
+
+ def _parse(self):
+ self._doc.reset()
+ self._parse_summary()
+
+ for (section,content) in self._read_sections():
+ if not section.startswith('..'):
+ section = ' '.join([s.capitalize() for s in section.split(' ')])
+ if section in ('Parameters', 'Returns', 'Raises', 'Warns',
+ 'Other Parameters', 'Attributes', 'Methods'):
+ self[section] = self._parse_param_list(content)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ elif section == 'See Also':
+ self['See Also'] = self._parse_see_also(content)
+ else:
+ self[section] = content
+
+ # string conversion routines
+
+ def _str_header(self, name, symbol='-'):
+ return [name, len(name)*symbol]
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ if self['Signature']:
+ return [self['Signature'].replace('*','\*')] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ if self['Summary']:
+ return self['Summary'] + ['']
+ else:
+ return []
+
+ def _str_extended_summary(self):
+ if self['Extended Summary']:
+ return self['Extended Summary'] + ['']
+ else:
+ return []
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ for param,param_type,desc in self[name]:
+ out += ['%s : %s' % (param, param_type)]
+ out += self._str_indent(desc)
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += self[name]
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ if not self['See Also']: return []
+ out = []
+ out += self._str_header("See Also")
+ last_had_desc = True
+ for func, desc, role in self['See Also']:
+ if role:
+ link = ':%s:`%s`' % (role, func)
+ elif func_role:
+ link = ':%s:`%s`' % (func_role, func)
+ else:
+ link = "`%s`_" % func
+ if desc or last_had_desc:
+ out += ['']
+ out += [link]
+ else:
+ out[-1] += ", %s" % link
+ if desc:
+ out += self._str_indent([' '.join(desc)])
+ last_had_desc = True
+ else:
+ last_had_desc = False
+ out += ['']
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ out += [' :%s: %s' % (section, ', '.join(references))]
+ return out
+
+ def __str__(self, func_role=''):
+ out = []
+ out += self._str_signature()
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Returns', 'Other Parameters',
+ 'Raises', 'Warns'):
+ out += self._str_param_list(param_list)
+ out += self._str_section('Warnings')
+ out += self._str_see_also(func_role)
+ for s in ('Notes','References','Examples'):
+ out += self._str_section(s)
+ for param_list in ('Attributes', 'Methods'):
+ out += self._str_param_list(param_list)
+ out += self._str_index()
+ return '\n'.join(out)
+
+
+def indent(str,indent=4):
+ indent_str = ' '*indent
+ if str is None:
+ return indent_str
+ lines = str.split('\n')
+ return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+ """Deindent a list of lines maximally"""
+ return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+ return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+ def __init__(self, func, role='func', doc=None, config={}):
+ self._f = func
+ self._role = role # e.g. "func" or "meth"
+
+ if doc is None:
+ if func is None:
+ raise ValueError("No function or docstring given")
+ doc = inspect.getdoc(func) or ''
+ NumpyDocString.__init__(self, doc)
+
+ if not self['Signature'] and func is not None:
+ func, func_name = self.get_func()
+ try:
+ # try to read signature
+ argspec = inspect.getargspec(func)
+ argspec = inspect.formatargspec(*argspec)
+ argspec = argspec.replace('*','\*')
+ signature = '%s%s' % (func_name, argspec)
+ except TypeError, e:
+ signature = '%s()' % func_name
+ self['Signature'] = signature
+
+ def get_func(self):
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
+ if inspect.isclass(self._f):
+ func = getattr(self._f, '__call__', self._f.__init__)
+ else:
+ func = self._f
+ return func, func_name
+
+ def __str__(self):
+ out = ''
+
+ func, func_name = self.get_func()
+ signature = self['Signature'].replace('*', '\*')
+
+ roles = {'func': 'function',
+ 'meth': 'method'}
+
+ if self._role:
+ if not roles.has_key(self._role):
+ print "Warning: invalid role %s" % self._role
+ out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
+ func_name)
+
+ out += super(FunctionDoc, self).__str__(func_role=self._role)
+ return out
+
+
+class ClassDoc(NumpyDocString):
+ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
+ config={}):
+ if not inspect.isclass(cls) and cls is not None:
+ raise ValueError("Expected a class or None, but got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+
+ if doc is None:
+ if cls is None:
+ raise ValueError("No class or documentation string given")
+ doc = pydoc.getdoc(cls)
+
+ NumpyDocString.__init__(self, doc)
+
+ if config.get('show_class_members', True):
+ if not self['Methods']:
+ self['Methods'] = [(name, '', '')
+ for name in sorted(self.methods)]
+ if not self['Attributes']:
+ self['Attributes'] = [(name, '', '')
+ for name in sorted(self.properties)]
+
+ @property
+ def methods(self):
+ if self._cls is None:
+ return []
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and callable(func)]
+
+ @property
+ def properties(self):
+ if self._cls is None:
+ return []
+ return [name for name,func in inspect.getmembers(self._cls)
+ if not name.startswith('_') and func is None]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/docscrape_sphinx.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,227 @@
+import re, inspect, textwrap, pydoc
+import sphinx
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+ def __init__(self, docstring, config={}):
+ self.use_plots = config.get('use_plots', False)
+ NumpyDocString.__init__(self, docstring, config=config)
+
+ # string conversion routines
+ def _str_header(self, name, symbol='`'):
+ return ['.. rubric:: ' + name, '']
+
+ def _str_field_list(self, name):
+ return [':' + name + ':']
+
+ def _str_indent(self, doc, indent=4):
+ out = []
+ for line in doc:
+ out += [' '*indent + line]
+ return out
+
+ def _str_signature(self):
+ return ['']
+ if self['Signature']:
+ return ['``%s``' % self['Signature']] + ['']
+ else:
+ return ['']
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Extended Summary'] + ['']
+
+ def _str_param_list(self, name):
+ out = []
+ if self[name]:
+ out += self._str_field_list(name)
+ out += ['']
+ for param,param_type,desc in self[name]:
+ out += self._str_indent(['**%s** : %s' % (param.strip(),
+ param_type)])
+ out += ['']
+ out += self._str_indent(desc,8)
+ out += ['']
+ return out
+
+ @property
+ def _obj(self):
+ if hasattr(self, '_cls'):
+ return self._cls
+ elif hasattr(self, '_f'):
+ return self._f
+ return None
+
+ def _str_member_list(self, name):
+ """
+ Generate a member listing, autosummary:: table where possible,
+ and a table where not.
+
+ """
+ out = []
+ if self[name]:
+ out += ['.. rubric:: %s' % name, '']
+ prefix = getattr(self, '_name', '')
+
+ if prefix:
+ prefix = '~%s.' % prefix
+
+ autosum = []
+ others = []
+ for param, param_type, desc in self[name]:
+ param = param.strip()
+ if not self._obj or hasattr(self._obj, param):
+ autosum += [" %s%s" % (prefix, param)]
+ else:
+ others.append((param, param_type, desc))
+
+ if 0:#autosum:
+ out += ['.. autosummary::', ' :toctree:', '']
+ out += autosum
+
+ if others:
+ maxlen_0 = max([len(x[0]) for x in others])
+ maxlen_1 = max([len(x[1]) for x in others])
+ hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
+ fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
+ n_indent = maxlen_0 + maxlen_1 + 4
+ out += [hdr]
+ for param, param_type, desc in others:
+ out += [fmt % (param.strip(), param_type)]
+ out += self._str_indent(desc, n_indent)
+ out += [hdr]
+ out += ['']
+ return out
+
+ def _str_section(self, name):
+ out = []
+ if self[name]:
+ out += self._str_header(name)
+ out += ['']
+ content = textwrap.dedent("\n".join(self[name])).split("\n")
+ out += content
+ out += ['']
+ return out
+
+ def _str_see_also(self, func_role):
+ out = []
+ if self['See Also']:
+ see_also = super(SphinxDocString, self)._str_see_also(func_role)
+ out = ['.. seealso::', '']
+ out += self._str_indent(see_also[2:])
+ return out
+
+ def _str_warnings(self):
+ out = []
+ if self['Warnings']:
+ out = ['.. warning::', '']
+ out += self._str_indent(self['Warnings'])
+ return out
+
+ def _str_index(self):
+ idx = self['index']
+ out = []
+ if len(idx) == 0:
+ return out
+
+ out += ['.. index:: %s' % idx.get('default','')]
+ for section, references in idx.iteritems():
+ if section == 'default':
+ continue
+ elif section == 'refguide':
+ out += [' single: %s' % (', '.join(references))]
+ else:
+ out += [' %s: %s' % (section, ','.join(references))]
+ return out
+
+ def _str_references(self):
+ out = []
+ if self['References']:
+ out += self._str_header('References')
+ if isinstance(self['References'], str):
+ self['References'] = [self['References']]
+ out.extend(self['References'])
+ out += ['']
+ # Latex collects all references to a separate bibliography,
+ # so we need to insert links to it
+ if sphinx.__version__ >= "0.6":
+ out += ['.. only:: latex','']
+ else:
+ out += ['.. latexonly::','']
+ items = []
+ for line in self['References']:
+ m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
+ if m:
+ items.append(m.group(1))
+ out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
+ return out
+
+ def _str_examples(self):
+ examples_str = "\n".join(self['Examples'])
+
+ if (self.use_plots and 'import matplotlib' in examples_str
+ and 'plot::' not in examples_str):
+ out = []
+ out += self._str_header('Examples')
+ out += ['.. plot::', '']
+ out += self._str_indent(self['Examples'])
+ out += ['']
+ return out
+ else:
+ return self._str_section('Examples')
+
+ def __str__(self, indent=0, func_role="obj"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Returns', 'Other Parameters',
+ 'Raises', 'Warns'):
+ out += self._str_param_list(param_list)
+ out += self._str_warnings()
+ out += self._str_see_also(func_role)
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_examples()
+ for param_list in ('Attributes', 'Methods'):
+ out += self._str_member_list(param_list)
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+ def __init__(self, obj, doc=None, config={}):
+ self.use_plots = config.get('use_plots', False)
+ FunctionDoc.__init__(self, obj, doc=doc, config=config)
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+ def __init__(self, obj, doc=None, func_doc=None, config={}):
+ self.use_plots = config.get('use_plots', False)
+ ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
+
+class SphinxObjDoc(SphinxDocString):
+ def __init__(self, obj, doc=None, config={}):
+ self._f = obj
+ SphinxDocString.__init__(self, doc, config=config)
+
+def get_doc_object(obj, what=None, doc=None, config={}):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
+ config=config)
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, doc=doc, config=config)
+ else:
+ if doc is None:
+ doc = pydoc.getdoc(obj)
+ return SphinxObjDoc(obj, doc, config=config)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/numpydoc.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,164 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+from sphinx.util.compat import Directive
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+ reference_offset=[0]):
+
+ cfg = dict(use_plots=app.config.numpydoc_use_plots,
+ show_class_members=app.config.numpydoc_show_class_members)
+
+ if what == 'module':
+ # Strip top title
+ title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+ re.I|re.S)
+ lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
+ else:
+ doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
+ lines[:] = unicode(doc).split(u"\n")
+
+ if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+ obj.__name__:
+ if hasattr(obj, '__module__'):
+ v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
+ else:
+ v = dict(full_name=obj.__name__)
+ lines += [u'', u'.. htmlonly::', '']
+ lines += [u' %s' % x for x in
+ (app.config.numpydoc_edit_link % v).split("\n")]
+
+ # replace reference numbers so that there are no duplicates
+ references = []
+ for line in lines:
+ line = line.strip()
+ m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
+ if m:
+ references.append(m.group(1))
+
+ # start renaming from the longest string, to avoid overwriting parts
+ references.sort(key=lambda x: -len(x))
+ if references:
+ for i, line in enumerate(lines):
+ for r in references:
+ if re.match(ur'^\d+$', r):
+ new_r = u"R%d" % (reference_offset[0] + int(r))
+ else:
+ new_r = u"%s%d" % (r, reference_offset[0])
+ lines[i] = lines[i].replace(u'[%s]_' % r,
+ u'[%s]_' % new_r)
+ lines[i] = lines[i].replace(u'.. [%s]' % r,
+ u'.. [%s]' % new_r)
+
+ reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+ # Do not try to inspect classes that don't define `__init__`
+ if (inspect.isclass(obj) and
+ (not hasattr(obj, '__init__') or
+ 'initializes x; see ' in pydoc.getdoc(obj.__init__))):
+ return '', ''
+
+ if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+ if not hasattr(obj, '__doc__'): return
+
+ doc = SphinxDocString(pydoc.getdoc(obj))
+ if doc['Signature']:
+ sig = re.sub(u"^[^(]*", u"", doc['Signature'])
+ return sig, u''
+
+def setup(app, get_doc_object_=get_doc_object):
+ global get_doc_object
+ get_doc_object = get_doc_object_
+
+ app.connect('autodoc-process-docstring', mangle_docstrings)
+ app.connect('autodoc-process-signature', mangle_signature)
+ app.add_config_value('numpydoc_edit_link', None, False)
+ app.add_config_value('numpydoc_use_plots', None, False)
+ app.add_config_value('numpydoc_show_class_members', True, True)
+
+ # Extra mangling domains
+ app.add_domain(NumpyPythonDomain)
+ app.add_domain(NumpyCDomain)
+
+#------------------------------------------------------------------------------
+# Docstring-mangling domains
+#------------------------------------------------------------------------------
+
+from docutils.statemachine import ViewList
+from sphinx.domains.c import CDomain
+from sphinx.domains.python import PythonDomain
+
+class ManglingDomainBase(object):
+ directive_mangling_map = {}
+
+ def __init__(self, *a, **kw):
+ super(ManglingDomainBase, self).__init__(*a, **kw)
+ self.wrap_mangling_directives()
+
+ def wrap_mangling_directives(self):
+ for name, objtype in self.directive_mangling_map.items():
+ self.directives[name] = wrap_mangling_directive(
+ self.directives[name], objtype)
+
+class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
+ name = 'np'
+ directive_mangling_map = {
+ 'function': 'function',
+ 'class': 'class',
+ 'exception': 'class',
+ 'method': 'function',
+ 'classmethod': 'function',
+ 'staticmethod': 'function',
+ 'attribute': 'attribute',
+ }
+
+class NumpyCDomain(ManglingDomainBase, CDomain):
+ name = 'np-c'
+ directive_mangling_map = {
+ 'function': 'function',
+ 'member': 'attribute',
+ 'macro': 'function',
+ 'type': 'class',
+ 'var': 'object',
+ }
+
+def wrap_mangling_directive(base_directive, objtype):
+ class directive(base_directive):
+ def run(self):
+ env = self.state.document.settings.env
+
+ name = None
+ if self.arguments:
+ m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
+ name = m.group(2).strip()
+
+ if not name:
+ name = self.arguments[0]
+
+ lines = list(self.content)
+ mangle_docstrings(env.app, objtype, name, None, None, lines)
+ self.content = ViewList(lines, self.content.parent)
+
+ return base_directive.run(self)
+
+ return directive
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/phantom_import.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,162 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+ app.connect('builder-inited', initialize)
+ app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+ fn = app.config.phantom_import_file
+ if (fn and os.path.isfile(fn)):
+ print "[numpydoc] Phantom importing modules from", fn, "..."
+ import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+ """
+ Insert a fake Python module to sys.modules, based on a XML file.
+
+ The XML file is expected to conform to Pydocweb DTD. The fake
+ module will contain dummy objects, which guarantee the following:
+
+ - Docstrings are correct.
+ - Class inheritance relationships are correct (if present in XML).
+ - Function argspec is *NOT* correct (even if present in XML).
+ Instead, the function signature is prepended to the function docstring.
+ - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+ Parameters
+ ----------
+ xml_file : str
+ Name of an XML file to read
+
+ """
+ import lxml.etree as etree
+
+ object_cache = {}
+
+ tree = etree.parse(xml_file)
+ root = tree.getroot()
+
+ # Sort items so that
+ # - Base classes come before classes inherited from them
+ # - Modules come before their contents
+ all_nodes = dict([(n.attrib['id'], n) for n in root])
+
+ def _get_bases(node, recurse=False):
+ bases = [x.attrib['ref'] for x in node.findall('base')]
+ if recurse:
+ j = 0
+ while True:
+ try:
+ b = bases[j]
+ except IndexError: break
+ if b in all_nodes:
+ bases.extend(_get_bases(all_nodes[b]))
+ j += 1
+ return bases
+
+ type_index = ['module', 'class', 'callable', 'object']
+
+ def base_cmp(a, b):
+ x = cmp(type_index.index(a.tag), type_index.index(b.tag))
+ if x != 0: return x
+
+ if a.tag == 'class' and b.tag == 'class':
+ a_bases = _get_bases(a, recurse=True)
+ b_bases = _get_bases(b, recurse=True)
+ x = cmp(len(a_bases), len(b_bases))
+ if x != 0: return x
+ if a.attrib['id'] in b_bases: return -1
+ if b.attrib['id'] in a_bases: return 1
+
+ return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
+
+ nodes = root.getchildren()
+ nodes.sort(base_cmp)
+
+ # Create phantom items
+ for node in nodes:
+ name = node.attrib['id']
+ doc = (node.text or '').decode('string-escape') + "\n"
+ if doc == "\n": doc = ""
+
+ # create parent, if missing
+ parent = name
+ while True:
+ parent = '.'.join(parent.split('.')[:-1])
+ if not parent: break
+ if parent in object_cache: break
+ obj = imp.new_module(parent)
+ object_cache[parent] = obj
+ sys.modules[parent] = obj
+
+ # create object
+ if node.tag == 'module':
+ obj = imp.new_module(name)
+ obj.__doc__ = doc
+ sys.modules[name] = obj
+ elif node.tag == 'class':
+ bases = [object_cache[b] for b in _get_bases(node)
+ if b in object_cache]
+ bases.append(object)
+ init = lambda self: None
+ init.__doc__ = doc
+ obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
+ obj.__name__ = name.split('.')[-1]
+ elif node.tag == 'callable':
+ funcname = node.attrib['id'].split('.')[-1]
+ argspec = node.attrib.get('argspec')
+ if argspec:
+ argspec = re.sub('^[^(]*', '', argspec)
+ doc = "%s%s\n\n%s" % (funcname, argspec, doc)
+ obj = lambda: 0
+ obj.__argspec_is_invalid_ = True
+ obj.func_name = funcname
+ obj.__name__ = name
+ obj.__doc__ = doc
+ if inspect.isclass(object_cache[parent]):
+ obj.__objclass__ = object_cache[parent]
+ else:
+ class Dummy(object): pass
+ obj = Dummy()
+ obj.__name__ = name
+ obj.__doc__ = doc
+ if inspect.isclass(object_cache[parent]):
+ obj.__get__ = lambda: None
+ object_cache[name] = obj
+
+ if parent:
+ if inspect.ismodule(object_cache[parent]):
+ obj.__module__ = parent
+ setattr(object_cache[parent], name.split('.')[-1], obj)
+
+ # Populate items
+ for node in root:
+ obj = object_cache.get(node.attrib['id'])
+ if obj is None: continue
+ for ref in node.findall('ref'):
+ if node.tag == 'class':
+ if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
+ setattr(obj, ref.attrib['name'],
+ object_cache.get(ref.attrib['ref']))
+ else:
+ setattr(obj, ref.attrib['name'],
+ object_cache.get(ref.attrib['ref']))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/plot_directive.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,619 @@
+"""
+A special directive for generating a matplotlib plot.
+
+.. warning::
+
+ This is a hacked version of plot_directive.py from Matplotlib.
+ It's very much subject to change!
+
+
+Usage
+-----
+
+Can be used like this::
+
+ .. plot:: examples/example.py
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+ plt.plot([1,2,3], [4,5,6])
+
+ .. plot::
+
+ A plotting example:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot([1,2,3], [4,5,6])
+
+The content is interpreted as doctest formatted if it has a line starting
+with ``>>>``.
+
+The ``plot`` directive supports the options
+
+ format : {'python', 'doctest'}
+ Specify the format of the input
+
+ include-source : bool
+ Whether to display the source code. Default can be changed in conf.py
+
+and the ``image`` directive options ``alt``, ``height``, ``width``,
+``scale``, ``align``, ``class``.
+
+Configuration options
+---------------------
+
+The plot directive has the following configuration options:
+
+ plot_include_source
+ Default value for the include-source option
+
+ plot_pre_code
+ Code that should be executed before each plot.
+
+ plot_basedir
+ Base directory, to which plot:: file names are relative to.
+ (If None or empty, file names are relative to the directoly where
+ the file containing the directive is.)
+
+ plot_formats
+ File formats to generate. List of tuples or strings::
+
+ [(suffix, dpi), suffix, ...]
+
+ that determine the file format and the DPI. For entries whose
+ DPI was omitted, sensible defaults are chosen.
+
+ plot_html_show_formats
+ Whether to show links to the files in HTML.
+
+TODO
+----
+
+* Refactor Latex output; now it's plain images, but it would be nice
+ to make them appear side-by-side, or in floats.
+
+"""
+
+import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
+import sphinx
+
+import warnings
+warnings.warn("A plot_directive module is also available under "
+ "matplotlib.sphinxext; expect this numpydoc.plot_directive "
+ "module to be deprecated after relevant features have been "
+ "integrated there.",
+ FutureWarning, stacklevel=2)
+
+
+#------------------------------------------------------------------------------
+# Registration hook
+#------------------------------------------------------------------------------
+
+def setup(app):
+ setup.app = app
+ setup.config = app.config
+ setup.confdir = app.confdir
+
+ app.add_config_value('plot_pre_code', '', True)
+ app.add_config_value('plot_include_source', False, True)
+ app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
+ app.add_config_value('plot_basedir', None, True)
+ app.add_config_value('plot_html_show_formats', True, True)
+
+ app.add_directive('plot', plot_directive, True, (0, 1, False),
+ **plot_directive_options)
+
+#------------------------------------------------------------------------------
+# plot:: directive
+#------------------------------------------------------------------------------
+from docutils.parsers.rst import directives
+from docutils import nodes
+
+def plot_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ return run(arguments, content, options, state_machine, state, lineno)
+plot_directive.__doc__ = __doc__
+
+def _option_boolean(arg):
+ if not arg or not arg.strip():
+ # no argument given, assume used as a flag
+ return True
+ elif arg.strip().lower() in ('no', '0', 'false'):
+ return False
+ elif arg.strip().lower() in ('yes', '1', 'true'):
+ return True
+ else:
+ raise ValueError('"%s" unknown boolean' % arg)
+
+def _option_format(arg):
+ return directives.choice(arg, ('python', 'lisp'))
+
+def _option_align(arg):
+ return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
+ "right"))
+
+plot_directive_options = {'alt': directives.unchanged,
+ 'height': directives.length_or_unitless,
+ 'width': directives.length_or_percentage_or_unitless,
+ 'scale': directives.nonnegative_int,
+ 'align': _option_align,
+ 'class': directives.class_option,
+ 'include-source': _option_boolean,
+ 'format': _option_format,
+ }
+
+#------------------------------------------------------------------------------
+# Generating output
+#------------------------------------------------------------------------------
+
+from docutils import nodes, utils
+
+try:
+ # Sphinx depends on either Jinja or Jinja2
+ import jinja2
+ def format_template(template, **kw):
+ return jinja2.Template(template).render(**kw)
+except ImportError:
+ import jinja
+ def format_template(template, **kw):
+ return jinja.from_string(template, **kw)
+
+TEMPLATE = """
+{{ source_code }}
+
+{{ only_html }}
+
+ {% if source_link or (html_show_formats and not multi_image) %}
+ (
+ {%- if source_link -%}
+ `Source code <{{ source_link }}>`__
+ {%- endif -%}
+ {%- if html_show_formats and not multi_image -%}
+ {%- for img in images -%}
+ {%- for fmt in img.formats -%}
+ {%- if source_link or not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ {%- endfor -%}
+ {%- endif -%}
+ )
+ {% endif %}
+
+ {% for img in images %}
+ .. figure:: {{ build_dir }}/{{ img.basename }}.png
+ {%- for option in options %}
+ {{ option }}
+ {% endfor %}
+
+ {% if html_show_formats and multi_image -%}
+ (
+ {%- for fmt in img.formats -%}
+ {%- if not loop.first -%}, {% endif -%}
+ `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
+ {%- endfor -%}
+ )
+ {%- endif -%}
+ {% endfor %}
+
+{{ only_latex }}
+
+ {% for img in images %}
+ .. image:: {{ build_dir }}/{{ img.basename }}.pdf
+ {% endfor %}
+
+"""
+
+class ImageFile(object):
+ def __init__(self, basename, dirname):
+ self.basename = basename
+ self.dirname = dirname
+ self.formats = []
+
+ def filename(self, format):
+ return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
+
+ def filenames(self):
+ return [self.filename(fmt) for fmt in self.formats]
+
+def run(arguments, content, options, state_machine, state, lineno):
+ if arguments and content:
+ raise RuntimeError("plot:: directive can't have both args and content")
+
+ document = state_machine.document
+ config = document.settings.env.config
+
+ options.setdefault('include-source', config.plot_include_source)
+
+ # determine input
+ rst_file = document.attributes['source']
+ rst_dir = os.path.dirname(rst_file)
+
+ if arguments:
+ if not config.plot_basedir:
+ source_file_name = os.path.join(rst_dir,
+ directives.uri(arguments[0]))
+ else:
+ source_file_name = os.path.join(setup.confdir, config.plot_basedir,
+ directives.uri(arguments[0]))
+ code = open(source_file_name, 'r').read()
+ output_base = os.path.basename(source_file_name)
+ else:
+ source_file_name = rst_file
+ code = textwrap.dedent("\n".join(map(str, content)))
+ counter = document.attributes.get('_plot_counter', 0) + 1
+ document.attributes['_plot_counter'] = counter
+ base, ext = os.path.splitext(os.path.basename(source_file_name))
+ output_base = '%s-%d.py' % (base, counter)
+
+ base, source_ext = os.path.splitext(output_base)
+ if source_ext in ('.py', '.rst', '.txt'):
+ output_base = base
+ else:
+ source_ext = ''
+
+ # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
+ output_base = output_base.replace('.', '-')
+
+ # is it in doctest format?
+ is_doctest = contains_doctest(code)
+ if options.has_key('format'):
+ if options['format'] == 'python':
+ is_doctest = False
+ else:
+ is_doctest = True
+
+ # determine output directory name fragment
+ source_rel_name = relpath(source_file_name, setup.confdir)
+ source_rel_dir = os.path.dirname(source_rel_name)
+ while source_rel_dir.startswith(os.path.sep):
+ source_rel_dir = source_rel_dir[1:]
+
+ # build_dir: where to place output files (temporarily)
+ build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
+ 'plot_directive',
+ source_rel_dir)
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+
+ # output_dir: final location in the builder's directory
+ dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
+ source_rel_dir))
+
+ # how to link to files from the RST file
+ dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
+ source_rel_dir).replace(os.path.sep, '/')
+ build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
+ source_link = dest_dir_link + '/' + output_base + source_ext
+
+ # make figures
+ try:
+ results = makefig(code, source_file_name, build_dir, output_base,
+ config)
+ errors = []
+ except PlotError, err:
+ reporter = state.memo.reporter
+ sm = reporter.system_message(
+ 2, "Exception occurred in plotting %s: %s" % (output_base, err),
+ line=lineno)
+ results = [(code, [])]
+ errors = [sm]
+
+ # generate output restructuredtext
+ total_lines = []
+ for j, (code_piece, images) in enumerate(results):
+ if options['include-source']:
+ if is_doctest:
+ lines = ['']
+ lines += [row.rstrip() for row in code_piece.split('\n')]
+ else:
+ lines = ['.. code-block:: python', '']
+ lines += [' %s' % row.rstrip()
+ for row in code_piece.split('\n')]
+ source_code = "\n".join(lines)
+ else:
+ source_code = ""
+
+ opts = [':%s: %s' % (key, val) for key, val in options.items()
+ if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
+
+ only_html = ".. only:: html"
+ only_latex = ".. only:: latex"
+
+ if j == 0:
+ src_link = source_link
+ else:
+ src_link = None
+
+ result = format_template(
+ TEMPLATE,
+ dest_dir=dest_dir_link,
+ build_dir=build_dir_link,
+ source_link=src_link,
+ multi_image=len(images) > 1,
+ only_html=only_html,
+ only_latex=only_latex,
+ options=opts,
+ images=images,
+ source_code=source_code,
+ html_show_formats=config.plot_html_show_formats)
+
+ total_lines.extend(result.split("\n"))
+ total_lines.extend("\n")
+
+ if total_lines:
+ state_machine.insert_input(total_lines, source=source_file_name)
+
+ # copy image files to builder's output directory
+ if not os.path.exists(dest_dir):
+ os.makedirs(dest_dir)
+
+ for code_piece, images in results:
+ for img in images:
+ for fn in img.filenames():
+ shutil.copyfile(fn, os.path.join(dest_dir,
+ os.path.basename(fn)))
+
+ # copy script (if necessary)
+ if source_file_name == rst_file:
+ target_name = os.path.join(dest_dir, output_base + source_ext)
+ f = open(target_name, 'w')
+ f.write(unescape_doctest(code))
+ f.close()
+
+ return errors
+
+
+#------------------------------------------------------------------------------
+# Run code and capture figures
+#------------------------------------------------------------------------------
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+
+import exceptions
+
+def contains_doctest(text):
+ try:
+ # check if it's valid Python as-is
+ compile(text, '<string>', 'exec')
+ return False
+ except SyntaxError:
+ pass
+ r = re.compile(r'^\s*>>>', re.M)
+ m = r.search(text)
+ return bool(m)
+
+def unescape_doctest(text):
+ """
+ Extract code from a piece of text, which contains either Python code
+ or doctests.
+
+ """
+ if not contains_doctest(text):
+ return text
+
+ code = ""
+ for line in text.split("\n"):
+ m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
+ if m:
+ code += m.group(2) + "\n"
+ elif line.strip():
+ code += "# " + line.strip() + "\n"
+ else:
+ code += "\n"
+ return code
+
+def split_code_at_show(text):
+ """
+ Split code at plt.show()
+
+ """
+
+ parts = []
+ is_doctest = contains_doctest(text)
+
+ part = []
+ for line in text.split("\n"):
+ if (not is_doctest and line.strip() == 'plt.show()') or \
+ (is_doctest and line.strip() == '>>> plt.show()'):
+ part.append(line)
+ parts.append("\n".join(part))
+ part = []
+ else:
+ part.append(line)
+ if "\n".join(part).strip():
+ parts.append("\n".join(part))
+ return parts
+
+class PlotError(RuntimeError):
+ pass
+
+def run_code(code, code_path, ns=None):
+ # Change the working directory to the directory of the example, so
+ # it can get at its data files, if any.
+ pwd = os.getcwd()
+ old_sys_path = list(sys.path)
+ if code_path is not None:
+ dirname = os.path.abspath(os.path.dirname(code_path))
+ os.chdir(dirname)
+ sys.path.insert(0, dirname)
+
+ # Redirect stdout
+ stdout = sys.stdout
+ sys.stdout = cStringIO.StringIO()
+
+ # Reset sys.argv
+ old_sys_argv = sys.argv
+ sys.argv = [code_path]
+
+ try:
+ try:
+ code = unescape_doctest(code)
+ if ns is None:
+ ns = {}
+ if not ns:
+ exec setup.config.plot_pre_code in ns
+ exec code in ns
+ except (Exception, SystemExit), err:
+ raise PlotError(traceback.format_exc())
+ finally:
+ os.chdir(pwd)
+ sys.argv = old_sys_argv
+ sys.path[:] = old_sys_path
+ sys.stdout = stdout
+ return ns
+
+
+#------------------------------------------------------------------------------
+# Generating figures
+#------------------------------------------------------------------------------
+
+def out_of_date(original, derived):
+ """
+ Returns True if derivative is out-of-date wrt original,
+ both of which are full file paths.
+ """
+ return (not os.path.exists(derived)
+ or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+
+def makefig(code, code_path, output_dir, output_base, config):
+ """
+ Run a pyplot script *code* and save the images under *output_dir*
+ with file names derived from *output_base*
+
+ """
+
+ # -- Parse format list
+ default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
+ formats = []
+ for fmt in config.plot_formats:
+ if isinstance(fmt, str):
+ formats.append((fmt, default_dpi.get(fmt, 80)))
+ elif type(fmt) in (tuple, list) and len(fmt)==2:
+ formats.append((str(fmt[0]), int(fmt[1])))
+ else:
+ raise PlotError('invalid image format "%r" in plot_formats' % fmt)
+
+ # -- Try to determine if all images already exist
+
+ code_pieces = split_code_at_show(code)
+
+ # Look for single-figure output files first
+ all_exists = True
+ img = ImageFile(output_base, output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ if all_exists:
+ return [(code, [img])]
+
+ # Then look for multi-figure output files
+ results = []
+ all_exists = True
+ for i, code_piece in enumerate(code_pieces):
+ images = []
+ for j in xrange(1000):
+ img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
+ for format, dpi in formats:
+ if out_of_date(code_path, img.filename(format)):
+ all_exists = False
+ break
+ img.formats.append(format)
+
+ # assume that if we have one, we have them all
+ if not all_exists:
+ all_exists = (j > 0)
+ break
+ images.append(img)
+ if not all_exists:
+ break
+ results.append((code_piece, images))
+
+ if all_exists:
+ return results
+
+ # -- We didn't find the files, so build them
+
+ results = []
+ ns = {}
+
+ for i, code_piece in enumerate(code_pieces):
+ # Clear between runs
+ plt.close('all')
+
+ # Run code
+ run_code(code_piece, code_path, ns)
+
+ # Collect images
+ images = []
+ fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+ for j, figman in enumerate(fig_managers):
+ if len(fig_managers) == 1 and len(code_pieces) == 1:
+ img = ImageFile(output_base, output_dir)
+ else:
+ img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
+ output_dir)
+ images.append(img)
+ for format, dpi in formats:
+ try:
+ figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
+ except exceptions.BaseException, err:
+ raise PlotError(traceback.format_exc())
+ img.formats.append(format)
+
+ # Results
+ results.append((code_piece, images))
+
+ return results
+
+
+#------------------------------------------------------------------------------
+# Relative pathnames
+#------------------------------------------------------------------------------
+
+try:
+ from os.path import relpath
+except ImportError:
+ def relpath(target, base=os.curdir):
+ """
+ Return a relative path to the target from either the current
+ dir or an optional base dir. Base can be a directory
+ specified either as absolute or relative to current dir.
+ """
+
+ if not os.path.exists(target):
+ raise OSError, 'Target does not exist: '+target
+
+ if not os.path.isdir(base):
+ raise OSError, 'Base is not a directory or does not exist: '+base
+
+ base_list = (os.path.abspath(base)).split(os.sep)
+ target_list = (os.path.abspath(target)).split(os.sep)
+
+ # On the windows platform the target may be on a completely
+ # different drive from the base.
+ if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
+ raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
+
+ # Starting from the filepath root, work out how much of the
+ # filepath is shared by base and target.
+ for i in range(min(len(base_list), len(target_list))):
+ if base_list[i] <> target_list[i]: break
+ else:
+ # If we broke out of the loop, i is pointing to the first
+ # differing path elements. If we didn't break out of the
+ # loop, i is pointing to identical path elements.
+ # Increment i so that in all cases it points to the first
+ # differing path elements.
+ i+=1
+
+ rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
+ return os.path.join(*rel_list)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/setup.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,31 @@
+from distutils.core import setup
+import setuptools
+import sys, os
+
+version = "0.4"
+
+setup(
+ name="numpydoc",
+ packages=["numpydoc"],
+ package_dir={"numpydoc": ""},
+ version=version,
+ description="Sphinx extension to support docstrings in Numpy format",
+ # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=["Development Status :: 3 - Alpha",
+ "Environment :: Plugins",
+ "License :: OSI Approved :: BSD License",
+ "Topic :: Documentation"],
+ keywords="sphinx numpy",
+ author="Pauli Virtanen and others",
+ author_email="pav at iki.fi",
+ url="http://github.com/numpy/numpy/tree/master/doc/sphinxext",
+ license="BSD",
+ zip_safe=False,
+ install_requires=["Sphinx >= 1.0.1"],
+ package_data={'numpydoc': 'tests', '': ''},
+ entry_points={
+ "console_scripts": [
+ "autosummary_generate = numpydoc.autosummary_generate:main",
+ ],
+ },
+)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/extensions/numpydocmod/traitsdoc.py Fri Mar 25 01:16:23 2011 -0400
@@ -0,0 +1,140 @@
+"""
+=========
+traitsdoc
+=========
+
+Sphinx extension that handles docstrings in the Numpy standard format, [1]
+and support Traits [2].
+
+This extension can be used as a replacement for ``numpydoc`` when support
+for Traits is required.
+
+.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [2] http://code.enthought.com/projects/traits/
+
+"""
+
+import inspect
+import os
+import pydoc
+
+import docscrape
+import docscrape_sphinx
+from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
+
+import numpydoc
+
+import comment_eater
+
+class SphinxTraitsDoc(SphinxClassDoc):
+ def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
+ if not inspect.isclass(cls):
+ raise ValueError("Initialise using a class. Got %r" % cls)
+ self._cls = cls
+
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
+ self._mod = modulename
+ self._name = cls.__name__
+ self._func_doc = func_doc
+
+ docstring = pydoc.getdoc(cls)
+ docstring = docstring.split('\n')
+
+ # De-indent paragraph
+ try:
+ indent = min(len(s) - len(s.lstrip()) for s in docstring
+ if s.strip())
+ except ValueError:
+ indent = 0
+
+ for n,line in enumerate(docstring):
+ docstring[n] = docstring[n][indent:]
+
+ self._doc = docscrape.Reader(docstring)
+ self._parsed_data = {
+ 'Signature': '',
+ 'Summary': '',
+ 'Description': [],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Traits': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'References': '',
+ 'Example': '',
+ 'Examples': '',
+ 'index': {}
+ }
+
+ self._parse()
+
+ def _str_summary(self):
+ return self['Summary'] + ['']
+
+ def _str_extended_summary(self):
+ return self['Description'] + self['Extended Summary'] + ['']
+
+ def __str__(self, indent=0, func_role="func"):
+ out = []
+ out += self._str_signature()
+ out += self._str_index() + ['']
+ out += self._str_summary()
+ out += self._str_extended_summary()
+ for param_list in ('Parameters', 'Traits', 'Methods',
+ 'Returns','Raises'):
+ out += self._str_param_list(param_list)
+ out += self._str_see_also("obj")
+ out += self._str_section('Notes')
+ out += self._str_references()
+ out += self._str_section('Example')
+ out += self._str_section('Examples')
+ out = self._str_indent(out,indent)
+ return '\n'.join(out)
+
+def looks_like_issubclass(obj, classname):
+ """ Return True if the object has a class or superclass with the given class
+ name.
+
+ Ignores old-style classes.
+ """
+ t = obj
+ if t.__name__ == classname:
+ return True
+ for klass in t.__mro__:
+ if klass.__name__ == classname:
+ return True
+ return False
+
+def get_doc_object(obj, what=None, config=None):
+ if what is None:
+ if inspect.isclass(obj):
+ what = 'class'
+ elif inspect.ismodule(obj):
+ what = 'module'
+ elif callable(obj):
+ what = 'function'
+ else:
+ what = 'object'
+ if what == 'class':
+ doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
+ if looks_like_issubclass(obj, 'HasTraits'):
+ for name, trait, comment in comment_eater.get_class_traits(obj):
+ # Exclude private traits.
+ if not name.startswith('_'):
+ doc['Traits'].append((name, trait, comment.splitlines()))
+ return doc
+ elif what in ('function', 'method'):
+ return SphinxFunctionDoc(obj, '', config=config)
+ else:
+ return SphinxDocString(pydoc.getdoc(obj), config=config)
+
+def setup(app):
+ # init numpydoc
+ numpydoc.setup(app, get_doc_object)
+
http://bitbucket.org/yt_analysis/yt-doc/changeset/2199c3adee48/
changeset: r53:2199c3adee48
user: MatthewTurk
date: 2011-03-25 06:17:44
summary: Fixed URLs
affected #: 34 files (170 bytes)
--- a/source/cookbook/aligned_cutting_plane.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/aligned_cutting_plane.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
vector in a sphere, and then use that to take an oblique slice. See
:ref:`derived-quantities` and :ref:`methods-cutting-planes` for more information.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/aligned_cutting_plane.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/aligned_cutting_plane.py .
.. code-block:: python
--- a/source/cookbook/arbitrary_vectors_on_slice.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/arbitrary_vectors_on_slice.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
through it, and add some extra vectors on top. Here we've used the imaginary
fields ``magnetic_field_x``, ``magnetic_field_y`` and ``magnetic_field_z``.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/arbitrary_vectors_on_slice.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/arbitrary_vectors_on_slice.py .
.. code-block:: python
--- a/source/cookbook/average_value.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/average_value.inc Fri Mar 25 01:17:44 2011 -0400
@@ -8,7 +8,7 @@
multiple CPUs if executed with mpirun and supplied the --parallel command line
argument.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/average_value.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/average_value.py .
.. code-block:: python
--- a/source/cookbook/contours_on_slice.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/contours_on_slice.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This is a simple recipe to show how to open a dataset, plot a slice
through it, and add contours of another quantity on top.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/contours_on_slice.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/contours_on_slice.py .
.. code-block:: python
--- a/source/cookbook/extract_fixed_resolution_data.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/extract_fixed_resolution_data.inc Fri Mar 25 01:17:44 2011 -0400
@@ -8,7 +8,7 @@
recipe shows how to insert a dataset into an external HDF5 file using h5py.
For more information see :class:`covering_grid`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/extract_fixed_resolution_data.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/extract_fixed_resolution_data.py .
.. code-block:: python
--- a/source/cookbook/find_clumps.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/find_clumps.inc Fri Mar 25 01:17:44 2011 -0400
@@ -9,7 +9,7 @@
found in astro-ph/0806.1653. For more information, see
:ref:`methods-contours`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/find_clumps.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/find_clumps.py .
.. code-block:: python
--- a/source/cookbook/global_phase_plots.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/global_phase_plots.inc Fri Mar 25 01:17:44 2011 -0400
@@ -8,7 +8,7 @@
of multiple CPUs if executed with mpirun and supplied the --parallel command
line argument. For more information, see :ref:`methods-profiles`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/global_phase_plots.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/global_phase_plots.py .
.. code-block:: python
--- a/source/cookbook/halo_finding.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/halo_finding.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This script shows the simples way of getting halo information. For more
information, see :ref:`halo_finding`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/halo_finding.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/halo_finding.py .
.. code-block:: python
--- a/source/cookbook/halo_mass_info.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/halo_mass_info.inc Fri Mar 25 01:17:44 2011 -0400
@@ -8,7 +8,7 @@
supplied the --parallel command line argument. For more information, see
:ref:`halo_finding`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/halo_mass_info.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/halo_mass_info.py .
.. code-block:: python
--- a/source/cookbook/halo_particle_plotting.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/halo_particle_plotting.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This is a simple mechanism for overplotting the particles belonging only to
halos. For more information, see :ref:`halo_finding`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/halo_particle_plotting.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/halo_particle_plotting.py .
.. code-block:: python
--- a/source/cookbook/halo_plotting.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/halo_plotting.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This is a mechanism for plotting circles representing identified particle halos
on an image. For more information, see :ref:`halo_finding`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/halo_plotting.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/halo_plotting.py .
.. code-block:: python
--- a/source/cookbook/light_cone_halo_mask.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/light_cone_halo_mask.inc Fri Mar 25 01:17:44 2011 -0400
@@ -10,7 +10,7 @@
positions in the light cone projection of all the halos in the mask, their
redshifts, virial radii, and virial masses.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/light_cone_halo_mask.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/light_cone_halo_mask.py .
.. code-block:: python
--- a/source/cookbook/make_light_cone.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/make_light_cone.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
The following recipe will make a light cone projection (see :ref:`light-cone-generator`)
of a single quantity over the redshift interval 0 to 0.4.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/make_light_cone.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/make_light_cone.py .
.. code-block:: python
--- a/source/cookbook/multi_plot.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/multi_plot.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
through it, centered at its most dense point. For more information, see
:func:`~yt.visualization.plot_collection.get_multi_plot`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/multi_plot.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/multi_plot.py .
.. code-block:: python
--- a/source/cookbook/multi_plot_3x2.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/multi_plot_3x2.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
through it, centered at its most dense point. For more information, see
:func:`~yt.visualization.plot_collection.get_multi_plot`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/multi_plot_3x2.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/multi_plot_3x2.py .
.. code-block:: python
--- a/source/cookbook/multi_width_save.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/multi_width_save.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
different widths, ensuring that across the plots we have the same min/max for
the colorbar.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/multi_width_save.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/multi_width_save.py .
.. code-block:: python
--- a/source/cookbook/offaxis_projection.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/offaxis_projection.inc Fri Mar 25 01:17:44 2011 -0400
@@ -10,7 +10,7 @@
Additionally, for the purposes of the recipe, we have simplified the image
considerably.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/offaxis_projection.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/offaxis_projection.py .
.. code-block:: python
--- a/source/cookbook/overplot_particles.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/overplot_particles.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
through it, and add particles on top. For more information see
:ref:`callbacks`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/overplot_particles.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/overplot_particles.py .
.. code-block:: python
--- a/source/cookbook/run_halo_profiler.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/run_halo_profiler.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
within a cosmological simulation. See :ref:`halo_profiling` for full documentation
of the HaloProfiler.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/run_halo_profiler.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/run_halo_profiler.py .
.. code-block:: python
--- a/source/cookbook/simple_pdf.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_pdf.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
distribution represents the fraction of the total mass rather than
absolute mass, you must include the "fractional" keyword.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_pdf.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_pdf.py .
.. code-block:: python
--- a/source/cookbook/simple_phase.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_phase.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This is a simple recipe to show how to open a dataset and then plot a phase
plot showing mass distribution in the rho-T plane.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_phase.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_phase.py .
.. code-block:: python
--- a/source/cookbook/simple_profile.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_profile.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
profile showing mass-weighted average Temperature as a function of Density
inside a sphere.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_profile.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_profile.py .
.. code-block:: python
--- a/source/cookbook/simple_projection.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_projection.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
weighted-average projection through it, centered at its most dense point. For
more information see :ref:`methods-projections`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_projection.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_projection.py .
.. code-block:: python
--- a/source/cookbook/simple_radial_profile.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_radial_profile.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
profile showing mass-weighted average Density inside a sphere. For more
information, see :ref:`methods-profiles`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_radial_profile.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_radial_profile.py .
.. code-block:: python
--- a/source/cookbook/simple_slice.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_slice.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
through it, centered at its most dense point. For more information, see
:ref:`methods-slices`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_slice.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_slice.py .
.. code-block:: python
--- a/source/cookbook/simple_volume_rendering.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simple_volume_rendering.inc Fri Mar 25 01:17:44 2011 -0400
@@ -12,7 +12,7 @@
Additionally, for the purposes of the recipe, we have simplified the image
considerably.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simple_volume_rendering.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simple_volume_rendering.py .
.. code-block:: python
--- a/source/cookbook/simulation_halo_profiler.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/simulation_halo_profiler.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
The following recipe will run the HaloProfiler (see :ref:`halo_profiling`) on
all the datasets in one simulation between z = 10 and 0.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/simulation_halo_profiler.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/simulation_halo_profiler.py .
.. code-block:: python
--- a/source/cookbook/sum_mass_in_sphere.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/sum_mass_in_sphere.inc Fri Mar 25 01:17:44 2011 -0400
@@ -9,7 +9,7 @@
supplied the --parallel command line argument. For more information, see
:ref:`derived-quantities`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/sum_mass_in_sphere.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/sum_mass_in_sphere.py .
.. code-block:: python
--- a/source/cookbook/thin_slice_projection.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/thin_slice_projection.inc Fri Mar 25 01:17:44 2011 -0400
@@ -7,7 +7,7 @@
weighted-average projection through it, but only through a very thin slice of
the region. For more information see :ref:`methods-projections`.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/thin_slice_projection.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/thin_slice_projection.py .
.. code-block:: python
--- a/source/cookbook/time_series_phase.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/time_series_phase.inc Fri Mar 25 01:17:44 2011 -0400
@@ -9,7 +9,7 @@
If run with mpirun and the --parallel flag, this will take advantage of
multiple processors.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/time_series_phase.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/time_series_phase.py .
.. code-block:: python
--- a/source/cookbook/time_series_quantity.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/time_series_quantity.inc Fri Mar 25 01:17:44 2011 -0400
@@ -9,7 +9,7 @@
If run with mpirun and the --parallel flag, this will take advantage of
multiple processors.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/time_series_quantity.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/time_series_quantity.py .
.. code-block:: python
--- a/source/cookbook/unique_light_cones.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/unique_light_cones.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
The following recipe will create 15 light cone projections that have
at most 10% volume in common with each other.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/unique_light_cones.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/unique_light_cones.py .
.. code-block:: python
--- a/source/cookbook/velocity_vectors_on_slice.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/velocity_vectors_on_slice.inc Fri Mar 25 01:17:44 2011 -0400
@@ -6,7 +6,7 @@
This is a simple recipe to show how to open a dataset, plot a slice
through it, and add velocity vectors on top.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/velocity_vectors_on_slice.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/velocity_vectors_on_slice.py .
.. code-block:: python
--- a/source/cookbook/zoomin_frames.inc Fri Mar 25 01:16:23 2011 -0400
+++ b/source/cookbook/zoomin_frames.inc Fri Mar 25 01:17:44 2011 -0400
@@ -8,7 +8,7 @@
recipe is provided to show how to be more flexible and add annotations and the
like -- the base system, of a zoomin, is provided by the ``yt zoomin`` command.
-The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw-file/tip/recipes/zoomin_frames.py .
+The latest version of this recipe can be downloaded here: http://hg.enzotools.org/cookbook/raw/tip/recipes/zoomin_frames.py .
.. code-block:: python
http://bitbucket.org/yt_analysis/yt-doc/changeset/db822e39769d/
changeset: r54:db822e39769d
user: MatthewTurk
date: 2011-03-25 06:37:52
summary: Adding numpydocmod to extensions
affected #: 1 file (15 bytes)
--- a/source/conf.py Fri Mar 25 01:17:44 2011 -0400
+++ b/source/conf.py Fri Mar 25 01:37:52 2011 -0400
@@ -16,7 +16,7 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath('../extensions/'))
# -- General configuration -----------------------------------------------------
@@ -27,7 +27,7 @@
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
- 'sphinx.ext.autosummary', 'numpydoc',
+ 'sphinx.ext.autosummary', 'numpydocmod',
]
# Add any paths that contain templates here, relative to this directory.
http://bitbucket.org/yt_analysis/yt-doc/changeset/db6504f17b4e/
changeset: r55:db6504f17b4e
user: MatthewTurk
date: 2011-04-04 01:30:52
summary: Adding HEALpix docs
affected #: 1 file (2.9 KB)
--- a/source/visualizing/volume_rendering.rst Fri Mar 25 01:37:52 2011 -0400
+++ b/source/visualizing/volume_rendering.rst Sun Apr 03 19:30:52 2011 -0400
@@ -229,5 +229,73 @@
:meth:`~yt.visualization.volume_rendering.camera.StereoPairCamera.split`, that
will return two cameras, a left and a right.
-Running in Parallel
--------------------
+HEALPix Volume Rendering
+------------------------
+
+yt now comes with a volume rendering module that casts rays out in all
+directions from a central location, according to the equal-area iso latitude
+pixelization mechanism, `HEALPix <http://healpix.jpl.nasa.gov/>`_. This can be
+used to generate all-sky column density maps as well as planetarium-ready
+visualizations.
+
+Unfortunately, due to issues spherical-projection issues, the generation of
+the initial volume rendering is much easier than the generation of the output
+image from the process.
+
+To actually issue the rays from a central location, the call is similar but not
+identical to the creation of a standard volume rendering.
+
+.. code-block:: python
+
+ from yt.mods import *
+ import yt.visualization.volume_rendering.camera as camera
+
+ Nside = 32
+ pf = load("DD0008/galaxy0008")
+ cam = camera.HEALpixCamera([0.5,0.5,0.5], 0.2, Nside,
+ pf = pf, log_fields = [False])
+ bitmap = cam.snapshot()
+
+The returned bitmap will, as per usual, be an array of integrated values.
+Because we’re using the projection transfer function, with the HEALpix camera,
+it will be an ordered pixel list of shape (12 times Nside times Nside, 1, 4)
+where the first channel is ordered in order of pixels as per the HEALpix
+notation. We now have to convert this to a regularly gridded set of values,
+between 0 and 2pi and 0 and pi, for the theta and phi coordinates.
+
+yt provides a helper function to go from pixel ID to angle (as well as a few
+other things). You can access this helper function in this manner:
+
+.. code-block:: python
+
+ import yt.utilities.amr_utils as au
+ from numpy import pi
+ phi, theta = na.mgrid[0.0:2*pi:800j, 0:pi:800j]
+ pixi = au.arr_ang2pix_nest(Nside, theta.ravel(), phi.ravel())
+ img = na.log10(bitmap[:,0,0][pixi]).reshape((800,800))
+
+The call to mgrid creates a regularly-spaced mesh of values. We then ask
+HEALpix what the pixel IDs are that fall into each of these regularly spaced
+mesh values, and then we apply those pixels in that order. This transformation
+will, someday, be implicit in the snapshot() call.
+
+At this point we can plot our regularly spaced mesh using one of several
+projections. We’ll do the Mollweide projection. To do this, we import the
+appropriate Matplotlib components and plot using the imshow command:
+
+.. code-block:: python
+
+ import matplotlib.figure
+ import matplotlib.backends.backend_agg
+
+ fig = matplotlib.figure.Figure((10, 5))
+ ax = fig.add_subplot(1,1,1,projection='mollweide')
+ image = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
+ cb = fig.colorbar(image, orientation='horizontal')
+
+ cb.set_label(r"$\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+ canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
+ canvas.print_figure("allsky.png")
+
+As it stands, this is still a bit do-it-yourself. Improvements and suggestions
+would be welcomed!
http://bitbucket.org/yt_analysis/yt-doc/changeset/d444e5d4798f/
changeset: r56:d444e5d4798f
user: MatthewTurk
date: 2011-04-04 02:18:43
summary: Merge
affected #: 1 file (3.5 KB)
--- a/source/analysis_modules/clump_finding.rst Sun Apr 03 19:30:52 2011 -0400
+++ b/source/analysis_modules/clump_finding.rst Sun Apr 03 20:18:43 2011 -0400
@@ -24,3 +24,94 @@
Once the clump-finder has finished, the user can write out a set of quantities for each clump in the
hierarchy. Additional info items can also be added. We also provide a recipe
for finding clumps in :ref:`cookbook-find_clumps`.
+
+Treecode Optimization
+---------------------
+
+.. sectionauthor:: Stephen Skory <s at skory.us>
+.. versionadded:: 2.1
+
+As mentioned above, the user has the option to limit clumps to those that are
+gravitationally bound.
+The correct and accurate way to calculate if a clump is gravitationally
+bound is to do the full double sum:
+
+.. math::
+
+ PE = \Sigma_{i=1}^N \Sigma_{j=i}^N \frac{G M_i M_j}{r_{ij}}
+
+where PE is the gravitational potential energy of N cells, G is the
+gravitational constant, M_i is the mass of cell i, and r_{ij} is the distance
+between cell i and j. The number of calculations required for this calculation
+grows with the square of N. Therefore, for large clumps with many cells, the
+test for boundedness can take a significant amount of time.
+
+An effective way to greatly speed up this calculation with minimal error
+is to use the treecode approximation pioneered by
+`Barnes and Hut (1986) <http://adsabs.harvard.edu/abs/1986Natur.324..446B>`_.
+This method of calculating gravitational potentials works by
+grouping individual masses that are located close together into a larger conglomerated
+mass with a geometric size equal to the distribution of the individual masses.
+For a mass cell that is sufficiently distant from the conglomerated mass,
+the gravitational calculation can be made using the conglomerate, rather than
+each individual mass, which saves time.
+
+The decision whether or not to use a conglomerate depends on the accuracy control
+parameter ``opening_angle``. Using the small-angle approximation, a conglomerate
+may be used if its geometric size subtends an angle no greater than the
+``opening_angle`` upon the remote mass. The default value is
+``opening_angle = 1``, which gives errors well under 1%. A value of
+``opening_angle = 0`` is identical to the full O(N^2) method, and larger values
+will speed up the calculation and sacrifice accuracy.
+
+The treecode method is iterative. Conglomerates may themselves form larger
+conglomerates. And if a larger conglomerate does not meet the ``opening_angle``
+criterion, the smaller conglomerates are tested as well. This iteration of
+conglomerates will
+cease once the level of the original masses is reached (this is what happens
+if ``opening_angle = 0``).
+
+Below are some examples of how to control the usage of the treecode.
+
+This example will calculate the ratio of the potential energy to kinetic energy
+for a spherical clump using the treecode method with an opening angle of 2.
+The default opening angle is 1.0:
+
+.. python::
+
+ from yt.mods import *
+
+ pf = load("DD0000")
+ sp = pf.h.sphere([0.5, 0.5, 0.5], radius=0.1)
+
+ ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
+ treecode=True, opening_angle=2.0)
+
+This example will accomplish the same as the above, but will use the full
+N^2 method.
+
+.. python::
+
+ from yt.mods import *
+
+ pf = load("DD0000")
+ sp = pf.h.sphere([0.5, 0.5, 0.5], radius=0.1)
+
+ ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
+ treecode=False)
+
+Here the treecode method is specified for clump finding (this is default).
+Please see the link above for the full example of how to find clumps (the
+trailing backslash is important!):
+
+.. python::
+
+ function_name = 'self.data.quantities["IsBound"](truncate=True, \
+ include_thermal_energy=True, treecode=True, opening_angle=2.0) > 1.0'
+ master_clump = amods.level_sets.Clump(data_source, None, field,
+ function=function_name)
+
+To turn off the treecode, of course one should turn treecode=False in the
+example above.
+
+
Repository URL: https://bitbucket.org/yt_analysis/yt-doc/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list