[yt-svn] commit/yt: 6 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Feb 3 18:52:21 PST 2016
6 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/b06714b97d9b/
Changeset: b06714b97d9b
Branch: yt
User: ngoldbaum
Date: 2016-01-27 23:02:49+00:00
Summary: Ensure number of octs is an int to avoid float indexing error
Affected #: 1 file
diff -r 481d5a937fb8e369949bae0073a7777d71cc952f -r b06714b97d9bb5f256c8392c9df8d1882825ff66 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -93,7 +93,7 @@
if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
return arr
nz = self.nz
- n_oct = arr.shape[0] / (nz**3.0)
+ n_oct = arr.shape[0] / (nz**3)
if arr.size == nz*nz*nz*n_oct:
new_shape = (nz, nz, nz, n_oct)
elif arr.size == nz*nz*nz*n_oct * 3:
https://bitbucket.org/yt_analysis/yt/commits/6b981856498a/
Changeset: 6b981856498a
Branch: yt
User: ngoldbaum
Date: 2016-01-27 23:11:58+00:00
Summary: index using integers in AMR flagging method code
Affected #: 1 file
diff -r b06714b97d9bb5f256c8392c9df8d1882825ff66 -r 6b981856498ab4ee2e58ece46408a3310abf1907 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -91,7 +91,7 @@
self.sigs = []
for dim in range(3):
d1 = (dim + 1) % 3
- d2 = (dim == 0)
+ d2 = int(dim == 0)
self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
@property
@@ -118,7 +118,7 @@
def find_by_zero_signature(self, dim):
sig = self.sigs[dim]
- grid_ends = np.zeros((sig.size, 2))
+ grid_ends = np.zeros((sig.size, 2), dtype='int64')
ng = 0
i = 0
while i < sig.size:
https://bitbucket.org/yt_analysis/yt/commits/188ad2bd3ad0/
Changeset: 188ad2bd3ad0
Branch: yt
User: ngoldbaum
Date: 2016-01-27 23:18:52+00:00
Summary: correct use of np.random.normal in fake_particle_ds
Affected #: 1 file
diff -r 6b981856498ab4ee2e58ece46408a3310abf1907 -r 188ad2bd3ad0b50e94e71b31c8998644660445a8 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -262,7 +262,7 @@
data = {}
for field, offset, u in zip(fields, offsets, units):
if "position" in field:
- v = np.random.normal(npart, 0.5, 0.25)
+ v = np.random.normal(loc=0.5, scale=0.25, size=npart)
np.clip(v, 0.0, 1.0, v)
v = (np.random.random(npart) - offset)
data[field] = (v, u)
https://bitbucket.org/yt_analysis/yt/commits/c83799fa2415/
Changeset: c83799fa2415
Branch: yt
User: ngoldbaum
Date: 2016-01-28 02:04:37+00:00
Summary: index with integers in lens setup
Affected #: 1 file
diff -r 188ad2bd3ad0b50e94e71b31c8998644660445a8 -r c83799fa24157bab7c3efb8ee989a89d4e257989 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -313,7 +313,7 @@
def _get_positions_vectors(self, camera, disparity):
- single_resolution_x = np.floor(camera.resolution[0]) / 2
+ single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
east_vec = camera.unit_vectors[0]
north_vec = camera.unit_vectors[1]
@@ -681,7 +681,7 @@
if self.disparity is None:
self.disparity = camera.width[0] / 1000.
- single_resolution_x = np.floor(camera.resolution[0])/2
+ single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
px = np.linspace(-np.pi, np.pi, single_resolution_x,
endpoint=True)[:, None]
py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
https://bitbucket.org/yt_analysis/yt/commits/33795293975e/
Changeset: 33795293975e
Branch: yt
User: ngoldbaum
Date: 2016-02-03 21:50:53+00:00
Summary: Merging, clearing conflict
Affected #: 110 files
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -28,40 +28,37 @@
yt/utilities/spatial/ckdtree.c
yt/utilities/lib/alt_ray_tracers.c
yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
yt/utilities/lib/bitarray.c
-yt/utilities/lib/CICDeposit.c
-yt/utilities/lib/ContourFinding.c
-yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
yt/utilities/lib/element_mappings.c
-yt/utilities/lib/FixedInterpolator.c
yt/utilities/lib/fortran_reader.c
yt/utilities/lib/freetype_writer.c
yt/utilities/lib/geometry_utils.c
yt/utilities/lib/image_utilities.c
-yt/utilities/lib/Interpolators.c
+yt/utilities/lib/interpolators.c
yt/utilities/lib/kdtree.c
yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
-yt/utilities/lib/Octree.c
-yt/utilities/lib/GridTree.c
+yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/pixelization_routines.c
yt/utilities/lib/png_writer.c
-yt/utilities/lib/PointsInVolume.c
-yt/utilities/lib/QuadTree.c
-yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/VolumeIntegrator.c
yt/utilities/lib/grid_traversal.c
yt/utilities/lib/marching_cubes.c
yt/utilities/lib/png_writer.h
yt/utilities/lib/write_array.c
-yt/utilities/lib/element_mappings.c
-yt/utilities/lib/mesh_construction.cpp
-yt/utilities/lib/mesh_samplers.cpp
-yt/utilities/lib/mesh_traversal.cpp
-yt/utilities/lib/mesh_intersection.cpp
syntax: glob
*.pyc
.*.swp
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
- """Insert an evaluated notebook into a document
-
- This uses runipy and nbconvert to transform a path to an unevaluated notebook
- into html suitable for embedding in a Sphinx document.
- """
- required_arguments = 1
- optional_arguments = 1
- option_spec = {'skip_exceptions': directives.flag}
- final_argument_whitespace = True
-
- def run(self): # check if there are spaces in the notebook name
- nb_path = self.arguments[0]
- if ' ' in nb_path: raise ValueError(
- "Due to issues with docutils stripping spaces from links, white "
- "space is not allowed in notebook filenames '{0}'".format(nb_path))
- # check if raw html is supported
- if not self.state.document.settings.raw_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
-
- cwd = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- os.chdir(tmpdir)
-
- # get path to notebook
- nb_filename = self.arguments[0]
- nb_basename = os.path.basename(nb_filename)
- rst_file = self.state_machine.document.attributes['source']
- rst_dir = os.path.abspath(os.path.dirname(rst_file))
- nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
- # Move files around.
- rel_dir = os.path.relpath(rst_dir, setup.confdir)
- dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
- dest_path = os.path.join(dest_dir, nb_basename)
-
- image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
- # Ensure desination build directory exists
- thread_safe_mkdir(os.path.dirname(dest_path))
-
- # Copy unevaluated notebook
- shutil.copyfile(nb_abs_path, dest_path)
-
- # Construct paths to versions getting copied over
- dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
- dest_path_script = string.replace(dest_path, '.ipynb', '.py')
- rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
- rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
- # Create python script vesion
- script_text = nb_to_python(nb_abs_path)
- f = open(dest_path_script, 'w')
- f.write(script_text.encode('utf8'))
- f.close()
-
- skip_exceptions = 'skip_exceptions' in self.options
-
- ret = evaluate_notebook(
- nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
- try:
- evaluated_text, resources = ret
- evaluated_text = write_notebook_output(
- resources, image_dir, image_rel_dir, evaluated_text)
- except ValueError:
- # This happens when a notebook raises an unhandled exception
- evaluated_text = ret
-
- # Create link to notebook and script files
- link_rst = "(" + \
- formatted_link(nb_basename) + "; " + \
- formatted_link(rel_path_eval) + "; " + \
- formatted_link(rel_path_script) + \
- ")"
-
- self.state_machine.insert_input([link_rst], rst_file)
-
- # create notebook node
- attributes = {'format': 'html', 'source': 'nb_path'}
- nb_node = notebook_node('', evaluated_text, **attributes)
- (nb_node.source, nb_node.line) = \
- self.state_machine.get_source_and_line(self.lineno)
-
- # add dependency
- self.state.document.settings.record_dependencies.add(nb_abs_path)
-
- # clean up
- os.chdir(cwd)
- shutil.rmtree(tmpdir, True)
-
- return [nb_node]
-
-
-class notebook_node(nodes.raw):
- pass
-
-def nb_to_python(nb_path):
- """convert notebook to python script"""
- exporter = python.PythonExporter()
- output, resources = exporter.from_filename(nb_path)
- return output
-
-def nb_to_html(nb_path):
- """convert notebook to html"""
- c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
- exporter = html.HTMLExporter(template_file='full', config=c)
- notebook = nbformat.read(open(nb_path), 'json')
- output, resources = exporter.from_notebook_node(notebook)
- header = output.split('<head>', 1)[1].split('</head>',1)[0]
- body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
- # http://imgur.com/eR9bMRH
- header = header.replace('<style', '<style scoped="scoped"')
- header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n',
- '')
- header = header.replace("code,pre{", "code{")
-
- # Filter out styles that conflict with the sphinx theme.
- filter_strings = [
- 'navbar',
- 'body{',
- 'alert{',
- 'uneditable-input{',
- 'collapse{',
- ]
-
- filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
- line_begin = [
- 'pre{',
- 'p{margin'
- ]
-
- filterfunc = lambda x: not any([s in x for s in filter_strings])
- header_lines = filter(filterfunc, header.split('\n'))
-
- filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
- header_lines = filter(filterfunc, header_lines)
-
- header = '\n'.join(header_lines)
-
- # concatenate raw html lines
- lines = ['<div class="ipynotebook">']
- lines.append(header)
- lines.append(body)
- lines.append('</div>')
- return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
- # Create evaluated version and save it to the dest path.
- notebook = nbformat.read(open(nb_path), 'json')
- nb_runner = NotebookRunner(notebook, pylab=False)
- try:
- nb_runner.run_notebook(skip_exceptions=skip_exceptions)
- except NotebookError as e:
- print('')
- print(e)
- # Return the traceback, filtering out ANSI color codes.
- # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
- return "Notebook conversion failed with the " \
- "following traceback: \n%s" % \
- re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
- str(e))
-
- if dest_path is None:
- dest_path = 'temp_evaluated.ipynb'
- nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
- ret = nb_to_html(dest_path)
- if dest_path is 'temp_evaluated.ipynb':
- os.remove(dest_path)
- return ret
-
-def formatted_link(path):
- return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
- self.visit_raw(node)
-
-def depart_notebook_node(self, node):
- self.depart_raw(node)
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_node(notebook_node,
- html=(visit_notebook_node, depart_notebook_node))
-
- app.add_directive('notebook', NotebookDirective)
-
- retdict = dict(
- version='0.1',
- parallel_read_safe=True,
- parallel_write_safe=True
- )
-
- return retdict
-
-def make_image_dir(setup, rst_dir):
- image_dir = setup.app.builder.outdir + os.path.sep + '_images'
- rel_dir = os.path.relpath(setup.confdir, rst_dir)
- image_rel_dir = rel_dir + os.path.sep + '_images'
- thread_safe_mkdir(image_dir)
- return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
- my_uuid = uuid.uuid4().hex
-
- for output in resources['outputs']:
- new_name = image_dir + os.path.sep + my_uuid + output
- new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
- evaluated_text = evaluated_text.replace(output, new_relative_name)
- with open(new_name, 'wb') as f:
- f.write(resources['outputs'][output])
- return evaluated_text
-
-def thread_safe_mkdir(dirname):
- try:
- os.makedirs(dirname)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- pass
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
- notebook_node, visit_notebook_node, depart_notebook_node, \
- evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
- """Insert an evaluated notebook cell into a document
-
- This uses runipy and nbconvert to transform an inline python
- script into html suitable for embedding in a Sphinx document.
- """
- required_arguments = 0
- optional_arguments = 1
- has_content = True
- option_spec = {'skip_exceptions': directives.flag}
-
- def run(self):
- # check if raw html is supported
- if not self.state.document.settings.raw_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
-
- cwd = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- os.chdir(tmpdir)
-
- rst_file = self.state_machine.document.attributes['source']
- rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
- image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
- # Construct notebook from cell content
- content = "\n".join(self.content)
- with open("temp.py", "w") as f:
- f.write(content)
-
- convert_to_ipynb('temp.py', 'temp.ipynb')
-
- skip_exceptions = 'skip_exceptions' in self.options
-
- evaluated_text, resources = evaluate_notebook(
- 'temp.ipynb', skip_exceptions=skip_exceptions)
-
- evaluated_text = write_notebook_output(
- resources, image_dir, image_rel_dir, evaluated_text)
-
- # create notebook node
- attributes = {'format': 'html', 'source': 'nb_path'}
- nb_node = notebook_node('', evaluated_text, **attributes)
- (nb_node.source, nb_node.line) = \
- self.state_machine.get_source_and_line(self.lineno)
-
- # clean up
- os.chdir(cwd)
- shutil.rmtree(tmpdir, True)
-
- return [nb_node]
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_node(notebook_node,
- html=(visit_notebook_node, depart_notebook_node))
-
- app.add_directive('notebook-cell', NotebookCellDirective)
-
- retdict = dict(
- version='0.1',
- parallel_read_safe=True,
- parallel_write_safe=True
- )
-
- return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
- with io.open(py_file, 'r', encoding='utf-8') as f:
- notebook = current.reads(f.read(), format='py')
- with io.open(ipynb_file, 'w', encoding='utf-8') as f:
- current.write(notebook, f, format='ipynb')
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
- """ A comment block.
- """
- is_comment = True
- def __init__(self, start_lineno, end_lineno, text):
- # int : The first line number in the block. 1-indexed.
- self.start_lineno = start_lineno
- # int : The last line number. Inclusive!
- self.end_lineno = end_lineno
- # str : The text block including '#' character but not any leading spaces.
- self.text = text
-
- def add(self, string, start, end, line):
- """ Add a new comment line.
- """
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
- self.text += string
-
- def __repr__(self):
- return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno, self.text)
-
-
-class NonComment(object):
- """ A non-comment block of code.
- """
- is_comment = False
- def __init__(self, start_lineno, end_lineno):
- self.start_lineno = start_lineno
- self.end_lineno = end_lineno
-
- def add(self, string, start, end, line):
- """ Add lines to the block.
- """
- if string.strip():
- # Only add if not entirely whitespace.
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno)
-
-
-class CommentBlocker(object):
- """ Pull out contiguous comment blocks.
- """
- def __init__(self):
- # Start with a dummy.
- self.current_block = NonComment(0, 0)
-
- # All of the blocks seen so far.
- self.blocks = []
-
- # The index mapping lines of code to their associated comment blocks.
- self.index = {}
-
- def process_file(self, file):
- """ Process a file object.
- """
- for token in tokenize.generate_tokens(file.next):
- self.process_token(*token)
- self.make_index()
-
- def process_token(self, kind, string, start, end, line):
- """ Process a single token.
- """
- if self.current_block.is_comment:
- if kind == tokenize.COMMENT:
- self.current_block.add(string, start, end, line)
- else:
- self.new_noncomment(start[0], end[0])
- else:
- if kind == tokenize.COMMENT:
- self.new_comment(string, start, end, line)
- else:
- self.current_block.add(string, start, end, line)
-
- def new_noncomment(self, start_lineno, end_lineno):
- """ We are transitioning from a noncomment to a comment.
- """
- block = NonComment(start_lineno, end_lineno)
- self.blocks.append(block)
- self.current_block = block
-
- def new_comment(self, string, start, end, line):
- """ Possibly add a new comment.
-
- Only adds a new comment if this comment is the only thing on the line.
- Otherwise, it extends the noncomment block.
- """
- prefix = line[:start[1]]
- if prefix.strip():
- # Oops! Trailing comment, not a comment block.
- self.current_block.add(string, start, end, line)
- else:
- # A comment block.
- block = Comment(start[0], end[0], string)
- self.blocks.append(block)
- self.current_block = block
-
- def make_index(self):
- """ Make the index mapping lines of actual code to their associated
- prefix comments.
- """
- for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
- if not block.is_comment:
- self.index[block.start_lineno] = prev
-
- def search_for_comment(self, lineno, default=None):
- """ Find the comment block just before the given line number.
-
- Returns None (or the specified default) if there is no such block.
- """
- if not self.index:
- self.make_index()
- block = self.index.get(lineno, None)
- text = getattr(block, 'text', default)
- return text
-
-
-def strip_comment_marker(text):
- """ Strip # markers at the front of a block of comment text.
- """
- lines = []
- for line in text.splitlines():
- lines.append(line.lstrip('#'))
- text = textwrap.dedent('\n'.join(lines))
- return text
-
-
-def get_class_traits(klass):
- """ Yield all of the documentation for trait definitions on a class object.
- """
- # FIXME: gracefully handle errors here or in the caller?
- source = inspect.getsource(klass)
- cb = CommentBlocker()
- cb.process_file(StringIO(source))
- mod_ast = compiler.parse(source)
- class_ast = mod_ast.node.nodes[0]
- for node in class_ast.code.nodes:
- # FIXME: handle other kinds of assignments?
- if isinstance(node, compiler.ast.Assign):
- name = node.nodes[0].name
- rhs = unparse(node.expr).strip()
- doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
- yield name, rhs, doc
-
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
- The unparse method takes a compiler.ast tree and transforms it back into
- valid python code. It is incomplete and currently only works for
- import statements, function calls, function definitions, assignments, and
- basic expressions.
-
- Inspired by python-2.5-svn/Demo/parser/unparse.py
-
- fixme: We may want to move to using _ast trees because the compiler for
- them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
- s = cStringIO.StringIO()
- UnparseCompilerAst(ast, s, single_line_functions)
- return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
- 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
- """ Methods in this class recursively traverse an AST and
- output source code for the abstract syntax; original formatting
- is disregarged.
- """
-
- #########################################################################
- # object interface.
- #########################################################################
-
- def __init__(self, tree, file = sys.stdout, single_line_functions=False):
- """ Unparser(tree, file=sys.stdout) -> None.
-
- Print the source for tree to file.
- """
- self.f = file
- self._single_func = single_line_functions
- self._do_indent = True
- self._indent = 0
- self._dispatch(tree)
- self._write("\n")
- self.f.flush()
-
- #########################################################################
- # Unparser private interface.
- #########################################################################
-
- ### format, output, and dispatch methods ################################
-
- def _fill(self, text = ""):
- "Indent a piece of text, according to the current indentation level"
- if self._do_indent:
- self._write("\n"+" "*self._indent + text)
- else:
- self._write(text)
-
- def _write(self, text):
- "Append a piece of text to the current line."
- self.f.write(text)
-
- def _enter(self):
- "Print ':', and increase the indentation."
- self._write(": ")
- self._indent += 1
-
- def _leave(self):
- "Decrease the indentation level."
- self._indent -= 1
-
- def _dispatch(self, tree):
- "_dispatcher function, _dispatching tree type T to method _T."
- if isinstance(tree, list):
- for t in tree:
- self._dispatch(t)
- return
- meth = getattr(self, "_"+tree.__class__.__name__)
- if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
- return
- meth(tree)
-
-
- #########################################################################
- # compiler.ast unparsing methods.
- #
- # There should be one method per concrete grammar type. They are
- # organized in alphabetical order.
- #########################################################################
-
- def _Add(self, t):
- self.__binary_op(t, '+')
-
- def _And(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") and (")
- self._write(")")
-
- def _AssAttr(self, t):
- """ Handle assigning an attribute of an object
- """
- self._dispatch(t.expr)
- self._write('.'+t.attrname)
-
- def _Assign(self, t):
- """ Expression Assignment such as "a = 1".
-
- This only handles assignment in expressions. Keyword assignment
- is handled separately.
- """
- self._fill()
- for target in t.nodes:
- self._dispatch(target)
- self._write(" = ")
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write('; ')
-
- def _AssName(self, t):
- """ Name on left hand side of expression.
-
- Treat just like a name on the right side of an expression.
- """
- self._Name(t)
-
- def _AssTuple(self, t):
- """ Tuple on left hand side of an expression.
- """
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- def _AugAssign(self, t):
- """ +=,-=,*=,/=,**=, etc. operations
- """
-
- self._fill()
- self._dispatch(t.node)
- self._write(' '+t.op+' ')
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write(';')
-
- def _Bitand(self, t):
- """ Bit and operation.
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" & ")
-
- def _Bitor(self, t):
- """ Bit or operation
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" | ")
-
- def _CallFunc(self, t):
- """ Function call.
- """
- self._dispatch(t.node)
- self._write("(")
- comma = False
- for e in t.args:
- if comma: self._write(", ")
- else: comma = True
- self._dispatch(e)
- if t.star_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("*")
- self._dispatch(t.star_args)
- if t.dstar_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("**")
- self._dispatch(t.dstar_args)
- self._write(")")
-
- def _Compare(self, t):
- self._dispatch(t.expr)
- for op, expr in t.ops:
- self._write(" " + op + " ")
- self._dispatch(expr)
-
- def _Const(self, t):
- """ A constant value such as an integer value, 3, or a string, "hello".
- """
- self._dispatch(t.value)
-
- def _Decorators(self, t):
- """ Handle function decorators (eg. @has_units)
- """
- for node in t.nodes:
- self._dispatch(node)
-
- def _Dict(self, t):
- self._write("{")
- for i, (k, v) in enumerate(t.items):
- self._dispatch(k)
- self._write(": ")
- self._dispatch(v)
- if i < len(t.items)-1:
- self._write(", ")
- self._write("}")
-
- def _Discard(self, t):
- """ Node for when return value is ignored such as in "foo(a)".
- """
- self._fill()
- self._dispatch(t.expr)
-
- def _Div(self, t):
- self.__binary_op(t, '/')
-
- def _Ellipsis(self, t):
- self._write("...")
-
- def _From(self, t):
- """ Handle "from xyz import foo, bar as baz".
- """
- # fixme: Are From and ImportFrom handled differently?
- self._fill("from ")
- self._write(t.modname)
- self._write(" import ")
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Function(self, t):
- """ Handle function definitions
- """
- if t.decorators is not None:
- self._fill("@")
- self._dispatch(t.decorators)
- self._fill("def "+t.name + "(")
- defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
- for i, arg in enumerate(zip(t.argnames, defaults)):
- self._write(arg[0])
- if arg[1] is not None:
- self._write('=')
- self._dispatch(arg[1])
- if i < len(t.argnames)-1:
- self._write(', ')
- self._write(")")
- if self._single_func:
- self._do_indent = False
- self._enter()
- self._dispatch(t.code)
- self._leave()
- self._do_indent = True
-
- def _Getattr(self, t):
- """ Handle getting an attribute of an object
- """
- if isinstance(t.expr, (Div, Mul, Sub, Add)):
- self._write('(')
- self._dispatch(t.expr)
- self._write(')')
- else:
- self._dispatch(t.expr)
-
- self._write('.'+t.attrname)
-
- def _If(self, t):
- self._fill()
-
- for i, (compare,code) in enumerate(t.tests):
- if i == 0:
- self._write("if ")
- else:
- self._write("elif ")
- self._dispatch(compare)
- self._enter()
- self._fill()
- self._dispatch(code)
- self._leave()
- self._write("\n")
-
- if t.else_ is not None:
- self._write("else")
- self._enter()
- self._fill()
- self._dispatch(t.else_)
- self._leave()
- self._write("\n")
-
- def _IfExp(self, t):
- self._dispatch(t.then)
- self._write(" if ")
- self._dispatch(t.test)
-
- if t.else_ is not None:
- self._write(" else (")
- self._dispatch(t.else_)
- self._write(")")
-
- def _Import(self, t):
- """ Handle "import xyz.foo".
- """
- self._fill("import ")
-
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Keyword(self, t):
- """ Keyword value assignment within function calls and definitions.
- """
- self._write(t.name)
- self._write("=")
- self._dispatch(t.expr)
-
- def _List(self, t):
- self._write("[")
- for i,node in enumerate(t.nodes):
- self._dispatch(node)
- if i < len(t.nodes)-1:
- self._write(", ")
- self._write("]")
-
- def _Module(self, t):
- if t.doc is not None:
- self._dispatch(t.doc)
- self._dispatch(t.node)
-
- def _Mul(self, t):
- self.__binary_op(t, '*')
-
- def _Name(self, t):
- self._write(t.name)
-
- def _NoneType(self, t):
- self._write("None")
-
- def _Not(self, t):
- self._write('not (')
- self._dispatch(t.expr)
- self._write(')')
-
- def _Or(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") or (")
- self._write(")")
-
- def _Pass(self, t):
- self._write("pass\n")
-
- def _Printnl(self, t):
- self._fill("print ")
- if t.dest:
- self._write(">> ")
- self._dispatch(t.dest)
- self._write(", ")
- comma = False
- for node in t.nodes:
- if comma: self._write(', ')
- else: comma = True
- self._dispatch(node)
-
- def _Power(self, t):
- self.__binary_op(t, '**')
-
- def _Return(self, t):
- self._fill("return ")
- if t.value:
- if isinstance(t.value, Tuple):
- text = ', '.join([ name.name for name in t.value.asList() ])
- self._write(text)
- else:
- self._dispatch(t.value)
- if not self._do_indent:
- self._write('; ')
-
- def _Slice(self, t):
- self._dispatch(t.expr)
- self._write("[")
- if t.lower:
- self._dispatch(t.lower)
- self._write(":")
- if t.upper:
- self._dispatch(t.upper)
- #if t.step:
- # self._write(":")
- # self._dispatch(t.step)
- self._write("]")
-
- def _Sliceobj(self, t):
- for i, node in enumerate(t.nodes):
- if i != 0:
- self._write(":")
- if not (isinstance(node, Const) and node.value is None):
- self._dispatch(node)
-
- def _Stmt(self, tree):
- for node in tree.nodes:
- self._dispatch(node)
-
- def _Sub(self, t):
- self.__binary_op(t, '-')
-
- def _Subscript(self, t):
- self._dispatch(t.expr)
- self._write("[")
- for i, value in enumerate(t.subs):
- if i != 0:
- self._write(",")
- self._dispatch(value)
- self._write("]")
-
- def _TryExcept(self, t):
- self._fill("try")
- self._enter()
- self._dispatch(t.body)
- self._leave()
-
- for handler in t.handlers:
- self._fill('except ')
- self._dispatch(handler[0])
- if handler[1] is not None:
- self._write(', ')
- self._dispatch(handler[1])
- self._enter()
- self._dispatch(handler[2])
- self._leave()
-
- if t.else_:
- self._fill("else")
- self._enter()
- self._dispatch(t.else_)
- self._leave()
-
- def _Tuple(self, t):
-
- if not t.nodes:
- # Empty tuple.
- self._write("()")
- else:
- self._write("(")
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- self._write(")")
-
- def _UnaryAdd(self, t):
- self._write("+")
- self._dispatch(t.expr)
-
- def _UnarySub(self, t):
- self._write("-")
- self._dispatch(t.expr)
-
- def _With(self, t):
- self._fill('with ')
- self._dispatch(t.expr)
- if t.vars:
- self._write(' as ')
- self._dispatch(t.vars.name)
- self._enter()
- self._dispatch(t.body)
- self._leave()
- self._write('\n')
-
- def _int(self, t):
- self._write(repr(t))
-
- def __binary_op(self, t, symbol):
- # Check if parenthesis are needed on left side and then dispatch
- has_paren = False
- left_class = str(t.left.__class__)
- if (left_class in op_precedence.keys() and
- op_precedence[left_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.left)
- if has_paren:
- self._write(')')
- # Write the appropriate symbol for operator
- self._write(symbol)
- # Check if parenthesis are needed on the right side and then dispatch
- has_paren = False
- right_class = str(t.right.__class__)
- if (right_class in op_precedence.keys() and
- op_precedence[right_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.right)
- if has_paren:
- self._write(')')
-
- def _float(self, t):
- # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
- # We prefer str here.
- self._write(str(t))
-
- def _str(self, t):
- self._write(repr(t))
-
- def _tuple(self, t):
- self._write(str(t))
-
- #########################################################################
- # These are the methods from the _ast modules unparse.
- #
- # As our needs to handle more advanced code increase, we may want to
- # modify some of the methods below so that they work for compiler.ast.
- #########################################################################
-
-# # stmt
-# def _Expr(self, tree):
-# self._fill()
-# self._dispatch(tree.value)
-#
-# def _Import(self, t):
-# self._fill("import ")
-# first = True
-# for a in t.names:
-# if first:
-# first = False
-# else:
-# self._write(", ")
-# self._write(a.name)
-# if a.asname:
-# self._write(" as "+a.asname)
-#
-## def _ImportFrom(self, t):
-## self._fill("from ")
-## self._write(t.module)
-## self._write(" import ")
-## for i, a in enumerate(t.names):
-## if i == 0:
-## self._write(", ")
-## self._write(a.name)
-## if a.asname:
-## self._write(" as "+a.asname)
-## # XXX(jpe) what is level for?
-##
-#
-# def _Break(self, t):
-# self._fill("break")
-#
-# def _Continue(self, t):
-# self._fill("continue")
-#
-# def _Delete(self, t):
-# self._fill("del ")
-# self._dispatch(t.targets)
-#
-# def _Assert(self, t):
-# self._fill("assert ")
-# self._dispatch(t.test)
-# if t.msg:
-# self._write(", ")
-# self._dispatch(t.msg)
-#
-# def _Exec(self, t):
-# self._fill("exec ")
-# self._dispatch(t.body)
-# if t.globals:
-# self._write(" in ")
-# self._dispatch(t.globals)
-# if t.locals:
-# self._write(", ")
-# self._dispatch(t.locals)
-#
-# def _Print(self, t):
-# self._fill("print ")
-# do_comma = False
-# if t.dest:
-# self._write(">>")
-# self._dispatch(t.dest)
-# do_comma = True
-# for e in t.values:
-# if do_comma:self._write(", ")
-# else:do_comma=True
-# self._dispatch(e)
-# if not t.nl:
-# self._write(",")
-#
-# def _Global(self, t):
-# self._fill("global")
-# for i, n in enumerate(t.names):
-# if i != 0:
-# self._write(",")
-# self._write(" " + n)
-#
-# def _Yield(self, t):
-# self._fill("yield")
-# if t.value:
-# self._write(" (")
-# self._dispatch(t.value)
-# self._write(")")
-#
-# def _Raise(self, t):
-# self._fill('raise ')
-# if t.type:
-# self._dispatch(t.type)
-# if t.inst:
-# self._write(", ")
-# self._dispatch(t.inst)
-# if t.tback:
-# self._write(", ")
-# self._dispatch(t.tback)
-#
-#
-# def _TryFinally(self, t):
-# self._fill("try")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# self._fill("finally")
-# self._enter()
-# self._dispatch(t.finalbody)
-# self._leave()
-#
-# def _excepthandler(self, t):
-# self._fill("except ")
-# if t.type:
-# self._dispatch(t.type)
-# if t.name:
-# self._write(", ")
-# self._dispatch(t.name)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _ClassDef(self, t):
-# self._write("\n")
-# self._fill("class "+t.name)
-# if t.bases:
-# self._write("(")
-# for a in t.bases:
-# self._dispatch(a)
-# self._write(", ")
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _FunctionDef(self, t):
-# self._write("\n")
-# for deco in t.decorators:
-# self._fill("@")
-# self._dispatch(deco)
-# self._fill("def "+t.name + "(")
-# self._dispatch(t.args)
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _For(self, t):
-# self._fill("for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# def _While(self, t):
-# self._fill("while ")
-# self._dispatch(t.test)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# # expr
-# def _Str(self, tree):
-# self._write(repr(tree.s))
-##
-# def _Repr(self, t):
-# self._write("`")
-# self._dispatch(t.value)
-# self._write("`")
-#
-# def _Num(self, t):
-# self._write(repr(t.n))
-#
-# def _ListComp(self, t):
-# self._write("[")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write("]")
-#
-# def _GeneratorExp(self, t):
-# self._write("(")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write(")")
-#
-# def _comprehension(self, t):
-# self._write(" for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# for if_clause in t.ifs:
-# self._write(" if ")
-# self._dispatch(if_clause)
-#
-# def _IfExp(self, t):
-# self._dispatch(t.body)
-# self._write(" if ")
-# self._dispatch(t.test)
-# if t.orelse:
-# self._write(" else ")
-# self._dispatch(t.orelse)
-#
-# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-# def _UnaryOp(self, t):
-# self._write(self.unop[t.op.__class__.__name__])
-# self._write("(")
-# self._dispatch(t.operand)
-# self._write(")")
-#
-# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-# "FloorDiv":"//", "Pow": "**"}
-# def _BinOp(self, t):
-# self._write("(")
-# self._dispatch(t.left)
-# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-# self._dispatch(t.right)
-# self._write(")")
-#
-# boolops = {_ast.And: 'and', _ast.Or: 'or'}
-# def _BoolOp(self, t):
-# self._write("(")
-# self._dispatch(t.values[0])
-# for v in t.values[1:]:
-# self._write(" %s " % self.boolops[t.op.__class__])
-# self._dispatch(v)
-# self._write(")")
-#
-# def _Attribute(self,t):
-# self._dispatch(t.value)
-# self._write(".")
-# self._write(t.attr)
-#
-## def _Call(self, t):
-## self._dispatch(t.func)
-## self._write("(")
-## comma = False
-## for e in t.args:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## for e in t.keywords:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## if t.starargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("*")
-## self._dispatch(t.starargs)
-## if t.kwargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("**")
-## self._dispatch(t.kwargs)
-## self._write(")")
-#
-# # slice
-# def _Index(self, t):
-# self._dispatch(t.value)
-#
-# def _ExtSlice(self, t):
-# for i, d in enumerate(t.dims):
-# if i != 0:
-# self._write(': ')
-# self._dispatch(d)
-#
-# # others
-# def _arguments(self, t):
-# first = True
-# nonDef = len(t.args)-len(t.defaults)
-# for a in t.args[0:nonDef]:
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a)
-# for a,d in zip(t.args[nonDef:], t.defaults):
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a),
-# self._write("=")
-# self._dispatch(d)
-# if t.vararg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("*"+t.vararg)
-# if t.kwarg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("**"+t.kwarg)
-#
-## def _keyword(self, t):
-## self._write(t.arg)
-## self._write("=")
-## self._dispatch(t.value)
-#
-# def _Lambda(self, t):
-# self._write("lambda ")
-# self._dispatch(t.args)
-# self._write(": ")
-# self._dispatch(t.body)
-
-
-
diff -r c83799fa24157bab7c3efb8ee989a89d4e257989 -r 33795293975e3369d071528d82fa8291c240d251 doc/extensions/numpydocmod/docscrape.py
--- a/doc/extensions/numpydocmod/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
- """A line-based string reader.
-
- """
- def __init__(self, data):
- """
- Parameters
- ----------
- data : str
- String with lines separated by '\n'.
-
- """
- if isinstance(data,list):
- self._str = data
- else:
- self._str = data.split('\n') # store string as list of lines
-
- self.reset()
-
- def __getitem__(self, n):
- return self._str[n]
-
- def reset(self):
- self._l = 0 # current line nr
-
- def read(self):
- if not self.eof():
- out = self[self._l]
- self._l += 1
- return out
- else:
- return ''
-
- def seek_next_non_empty_line(self):
- for l in self[self._l:]:
- if l.strip():
- break
- else:
- self._l += 1
-
- def eof(self):
- return self._l >= len(self._str)
-
- def read_to_condition(self, condition_func):
- start = self._l
- for line in self[start:]:
- if condition_func(line):
- return self[start:self._l]
- self._l += 1
- if self.eof():
- return self[start:self._l+1]
- return []
-
- def read_to_next_empty_line(self):
- self.seek_next_non_empty_line()
- def is_empty(line):
- return not line.strip()
- return self.read_to_condition(is_empty)
-
- def read_to_next_unindented_line(self):
- def is_unindented(line):
- return (line.strip() and (len(line.lstrip()) == len(line)))
- return self.read_to_condition(is_unindented)
-
- def peek(self,n=0):
- if self._l + n < len(self._str):
- return self[self._l + n]
- else:
- return ''
-
- def is_empty(self):
- return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
- def __init__(self, docstring, config={}):
- docstring = textwrap.dedent(docstring).split('\n')
-
- self._doc = Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': [''],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Attributes': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'Warnings': [],
- 'References': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def __getitem__(self,key):
- return self._parsed_data[key]
-
- def __setitem__(self,key,val):
- if not self._parsed_data.has_key(key):
- warn("Unknown section %s" % key)
- else:
- self._parsed_data[key] = val
-
- def _is_at_section(self):
- self._doc.seek_next_non_empty_line()
-
- if self._doc.eof():
- return False
-
- l1 = self._doc.peek().strip() # e.g. Parameters
-
- if l1.startswith('.. index::'):
- return True
-
- l2 = self._doc.peek(1).strip() # ---------- or ==========
- return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
- def _strip(self,doc):
- i = 0
- j = 0
- for i,line in enumerate(doc):
- if line.strip(): break
-
- for j,line in enumerate(doc[::-1]):
- if line.strip(): break
-
- return doc[i:len(doc)-j]
-
- def _read_to_next_section(self):
- section = self._doc.read_to_next_empty_line()
-
- while not self._is_at_section() and not self._doc.eof():
- if not self._doc.peek(-1).strip(): # previous line was empty
- section += ['']
-
- section += self._doc.read_to_next_empty_line()
-
- return section
-
- def _read_sections(self):
- while not self._doc.eof():
- data = self._read_to_next_section()
- name = data[0].strip()
-
- if name.startswith('..'): # index section
- yield name, data[1:]
- elif len(data) < 2:
- yield StopIteration
- else:
- yield name, self._strip(data[2:])
-
- def _parse_param_list(self,content):
- r = Reader(content)
- params = []
- while not r.eof():
- header = r.read().strip()
- if ' : ' in header:
- arg_name, arg_type = header.split(' : ')[:2]
- else:
- arg_name, arg_type = header, ''
-
- desc = r.read_to_next_unindented_line()
- desc = dedent_lines(desc)
-
- params.append((arg_name,arg_type,desc))
-
- return params
-
-
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
- def _parse_see_also(self, content):
- """
- func_name : Descriptive text
- continued text
- another_func_name : Descriptive text
- func_name1, func_name2, :meth:`func_name`, func_name3
-
- """
- items = []
-
- def parse_item_name(text):
- """Match ':role:`name`' or 'name'"""
- m = self._name_rgx.match(text)
- if m:
- g = m.groups()
- if g[1] is None:
- return g[3], None
- else:
- return g[2], g[1]
- raise ValueError("%s is not a item name" % text)
-
- def push_item(name, rest):
- if not name:
- return
- name, role = parse_item_name(name)
- items.append((name, list(rest), role))
- del rest[:]
-
- current_func = None
- rest = []
-
- for line in content:
- if not line.strip(): continue
-
- m = self._name_rgx.match(line)
- if m and line[m.end():].strip().startswith(':'):
- push_item(current_func, rest)
- current_func, line = line[:m.end()], line[m.end():]
- rest = [line.split(':', 1)[1].strip()]
- if not rest[0]:
- rest = []
- elif not line.startswith(' '):
- push_item(current_func, rest)
- current_func = None
- if ',' in line:
- for func in line.split(','):
- if func.strip():
- push_item(func, [])
- elif line.strip():
- current_func = line
- elif current_func is not None:
- rest.append(line.strip())
- push_item(current_func, rest)
- return items
-
- def _parse_index(self, section, content):
- """
- .. index: default
- :refguide: something, else, and more
-
- """
- def strip_each_in(lst):
- return [s.strip() for s in lst]
-
- out = {}
- section = section.split('::')
- if len(section) > 1:
- out['default'] = strip_each_in(section[1].split(','))[0]
- for line in content:
- line = line.split(':')
- if len(line) > 2:
- out[line[1]] = strip_each_in(line[2].split(','))
- return out
-
- def _parse_summary(self):
- """Grab signature (if given) and summary"""
- if self._is_at_section():
- return
-
- summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
- self['Signature'] = summary_str
- if not self._is_at_section():
- self['Summary'] = self._doc.read_to_next_empty_line()
- else:
- self['Summary'] = summary
-
- if not self._is_at_section():
- self['Extended Summary'] = self._read_to_next_section()
-
- def _parse(self):
- self._doc.reset()
- self._parse_summary()
-
- for (section,content) in self._read_sections():
- if not section.startswith('..'):
- section = ' '.join([s.capitalize() for s in section.split(' ')])
- if section in ('Parameters', 'Returns', 'Raises', 'Warns',
- 'Other Parameters', 'Attributes', 'Methods'):
- self[section] = self._parse_param_list(content)
- elif section.startswith('.. index::'):
- self['index'] = self._parse_index(section, content)
- elif section == 'See Also':
- self['See Also'] = self._parse_see_also(content)
- else:
- self[section] = content
-
- # string conversion routines
-
- def _str_header(self, name, symbol='-'):
- return [name, len(name)*symbol]
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- if self['Signature']:
- return [self['Signature'].replace('*','\*')] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- if self['Summary']:
- return self['Summary'] + ['']
- else:
- return []
-
- def _str_extended_summary(self):
- if self['Extended Summary']:
- return self['Extended Summary'] + ['']
- else:
- return []
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- for param,param_type,desc in self[name]:
- out += ['%s : %s' % (param, param_type)]
- out += self._str_indent(desc)
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += self[name]
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- if not self['See Also']: return []
- out = []
- out += self._str_header("See Also")
- last_had_desc = True
- for func, desc, role in self['See Also']:
- if role:
- link = ':%s:`%s`' % (role, func)
- elif func_role:
- link = ':%s:`%s`' % (func_role, func)
- else:
- link = "`%s`_" % func
- if desc or last_had_desc:
- out += ['']
- out += [link]
- else:
- out[-1] += ", %s" % link
- if desc:
- out += self._str_indent([' '.join(desc)])
- last_had_desc = True
- else:
- last_had_desc = False
- out += ['']
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.iteritems():
- if section == 'default':
- continue
- out += [' :%s: %s' % (section, ', '.join(references))]
- return out
-
- def __str__(self, func_role=''):
- out = []
- out += self._str_signature()
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_section('Warnings')
- out += self._str_see_also(func_role)
- for s in ('Notes','References','Examples'):
- out += self._str_section(s)
- for param_list in ('Attributes', 'Methods'):
- out += self._str_param_list(param_list)
- out += self._str_index()
- return '\n'.join(out)
-
-
-def indent(str,indent=4):
- indent_str = ' '*indent
- if str is None:
- return indent_str
- lines = str.split('\n')
- return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
- """Deindent a list of lines maximally"""
- return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
- return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
- def __init__(self, func, role='func', doc=None, config={}):
- self._f = func
- self._role = role # e.g. "func" or "meth"
-
- if doc is None:
- if func is None:
- raise ValueError("No function or docstring given")
- doc = inspect.getdoc(func) or ''
- NumpyDocString.__init__(self, doc)
-
- if not self['Signature'] and func is not None:
- func, func_name = self.get_func()
- try:
- # try to read signature
- argspec = inspect.getargspec(func)
- argspec = inspect.formatargspec(*argspec)
- argspec = argspec.replace('*','\*')
- signature = '%s%s' % (func_name, argspec)
- except TypeError, e:
- signature = '%s()' % func_name
- self['Signature'] = signature
-
- def get_func(self):
- func_name = getattr(self._f, '__name__', self.__class__.__name__)
- if inspect.isclass(self._f):
- func = getattr(self._f, '__call__', self._f.__init__)
- else:
- func = self._f
- return func, func_name
-
- def __str__(self):
- out = ''
-
- func, func_name = self.get_func()
- signature = self['Signature'].replace('*', '\*')
-
- roles = {'func': 'function',
- 'meth': 'method'}
-
- if self._role:
- if not roles.has_key(self._role):
- print("Warning: invalid role %s" % self._role)
- out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
- func_name)
-
- out += super(FunctionDoc, self).__str__(func_role=self._role)
- return out
-
-
-class ClassDoc(NumpyDocString):
- def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
- config={}):
- if not inspect.isclass(cls) and cls is not None:
- raise ValueError("Expected a class or None, but got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
-
- if doc is None:
- if cls is None:
- raise ValueError("No class or documentation string given")
- doc = pydoc.getdoc(cls)
-
- NumpyDocString.__init__(self, doc)
-
- if config.get('show_class_members', True):
- if not self['Methods']:
- self['Methods'] = [(name, '', '')
- for name in sorted(self.methods)]
- if not self['Attributes']:
- self['Attributes'] = [(name, '', '')
- for name in sorted(self.properties)]
-
- @property
- def methods(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and callable(func)]
-
- @property
- def properties(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and func is None]
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/be66247e2a8a/
Changeset: be66247e2a8a
Branch: yt
User: ngoldbaum
Date: 2016-02-04 02:52:14+00:00
Summary: Merged in ngoldbaum/yt (pull request #1969)
Compatible with upcoming numpy 1.11 release. Closes #1162
Affected #: 4 files
diff -r f7c36e4a075f699037baa4fcee0b7450df85a8b4 -r be66247e2a8a1454ad2e7c29ef3473bf17e39b6e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -92,7 +92,7 @@
def _reshape_vals(self, arr):
nz = self.nz
if len(arr.shape) <= 2:
- n_oct = arr.shape[0] / (nz**3.0)
+ n_oct = arr.shape[0] / (nz**3)
else:
n_oct = max(arr.shape)
if arr.size == nz*nz*nz*n_oct:
diff -r f7c36e4a075f699037baa4fcee0b7450df85a8b4 -r be66247e2a8a1454ad2e7c29ef3473bf17e39b6e yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -262,7 +262,7 @@
data = {}
for field, offset, u in zip(fields, offsets, units):
if "position" in field:
- v = np.random.normal(npart, 0.5, 0.25)
+ v = np.random.normal(loc=0.5, scale=0.25, size=npart)
np.clip(v, 0.0, 1.0, v)
v = (np.random.random(npart) - offset)
data[field] = (v, u)
diff -r f7c36e4a075f699037baa4fcee0b7450df85a8b4 -r be66247e2a8a1454ad2e7c29ef3473bf17e39b6e yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -91,7 +91,7 @@
self.sigs = []
for dim in range(3):
d1 = (dim + 1) % 3
- d2 = (dim == 0)
+ d2 = int(dim == 0)
self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
@property
@@ -118,7 +118,7 @@
def find_by_zero_signature(self, dim):
sig = self.sigs[dim]
- grid_ends = np.zeros((sig.size, 2))
+ grid_ends = np.zeros((sig.size, 2), dtype='int64')
ng = 0
i = 0
while i < sig.size:
diff -r f7c36e4a075f699037baa4fcee0b7450df85a8b4 -r be66247e2a8a1454ad2e7c29ef3473bf17e39b6e yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -313,7 +313,7 @@
def _get_positions_vectors(self, camera, disparity):
- single_resolution_x = np.floor(camera.resolution[0]) / 2
+ single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
east_vec = camera.unit_vectors[0]
north_vec = camera.unit_vectors[1]
@@ -681,7 +681,7 @@
if self.disparity is None:
self.disparity = camera.width[0] / 1000.
- single_resolution_x = np.floor(camera.resolution[0])/2
+ single_resolution_x = int(np.floor(camera.resolution[0]) / 2)
px = np.linspace(-np.pi, np.pi, single_resolution_x,
endpoint=True)[:, None]
py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list