[yt-svn] commit/yt: 15 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Sat Jul 9 14:49:42 PDT 2016
15 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/1a36952224df/
Changeset: 1a36952224df
Branch: yt
User: qobilidop
Date: 2016-02-05 09:14:25+00:00
Summary: tweak index and gadget hdf5 frontend (which is also owls)
Affected #: 3 files
diff -r cd0a29c8ed02c58096e6e0f3f6c7aaa6b8cf6dc3 -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -74,6 +74,7 @@
additional_fields=(),
unit_base=None, n_ref=64,
over_refine_factor=1,
+ ptype="all",
bounding_box = None,
header_spec = "default",
field_spec = "default",
@@ -88,6 +89,7 @@
ptype_spec, gadget_ptype_specs)
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
+ self.ptype = ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -341,6 +343,7 @@
def __init__(self, filename, dataset_type="gadget_hdf5",
unit_base = None, n_ref=64,
over_refine_factor=1,
+ ptype="all",
bounding_box = None,
units_override=None):
self.storage_filename = None
@@ -351,6 +354,7 @@
super(GadgetHDF5Dataset, self).__init__(
filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
over_refine_factor=over_refine_factor,
+ ptype=ptype,
bounding_box = bounding_box)
def _get_hvals(self):
diff -r cd0a29c8ed02c58096e6e0f3f6c7aaa6b8cf6dc3 -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -123,13 +123,18 @@
f.close()
def _initialize_index(self, data_file, regions):
+ ptype = self.ds.ptype
f = _get_h5_handle(data_file.filename)
- pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+ if ptype == "all":
+ pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+ else:
+ pcount = f["/Header"].attrs["NumPart_ThisFile"][int(ptype[-1])]
morton = np.empty(pcount, dtype='uint64')
ind = 0
for key in f.keys():
if not key.startswith("PartType"): continue
if "Coordinates" not in f[key]: continue
+ if ptype != "all" and key != ptype: continue
ds = f[key]["Coordinates"]
dt = ds.dtype.newbyteorder("N") # Native
pos = np.empty(ds.shape, dtype=dt)
diff -r cd0a29c8ed02c58096e6e0f3f6c7aaa6b8cf6dc3 -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -60,14 +60,20 @@
cls = self.dataset._file_class
self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
for i in range(ndoms)]
- self.total_particles = sum(
- sum(d.total_particles.values()) for d in self.data_files)
+ ptype = self.dataset.ptype
+ if ptype == "all":
+ self.total_particles = sum(
+ sum(d.total_particles.values()) for d in self.data_files)
+ else:
+ self.total_particles = sum(
+ d.total_particles[ptype] for d in self.data_files)
ds = self.dataset
self.oct_handler = ParticleOctreeContainer(
[1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
over_refine = ds.over_refine_factor)
self.oct_handler.n_ref = ds.n_ref
- mylog.info("Allocating for %0.3e particles", self.total_particles)
+ mylog.info("Allocating for %0.3e particles (%s)",
+ self.total_particles, ptype)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
self.regions = ParticleRegions(
@@ -90,10 +96,14 @@
# * Broadcast back a serialized octree to join
#
# For now we will do this in serial.
+ ptype = self.dataset.ptype
morton = np.empty(self.total_particles, dtype="uint64")
ind = 0
for data_file in self.data_files:
- npart = sum(data_file.total_particles.values())
+ if ptype == "all":
+ npart = sum(data_file.total_particles.values())
+ else:
+ npart = data_file.total_particles[ptype]
morton[ind:ind + npart] = \
self.io._initialize_index(data_file, self.regions)
ind += npart
https://bitbucket.org/yt_analysis/yt/commits/cf61b281a719/
Changeset: cf61b281a719
Branch: yt
User: qobilidop
Date: 2016-02-05 10:20:41+00:00
Summary: merge with current head
Affected #: 231 files
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -28,40 +28,37 @@
yt/utilities/spatial/ckdtree.c
yt/utilities/lib/alt_ray_tracers.c
yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
yt/utilities/lib/bitarray.c
-yt/utilities/lib/CICDeposit.c
-yt/utilities/lib/ContourFinding.c
-yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
yt/utilities/lib/element_mappings.c
-yt/utilities/lib/FixedInterpolator.c
yt/utilities/lib/fortran_reader.c
yt/utilities/lib/freetype_writer.c
yt/utilities/lib/geometry_utils.c
yt/utilities/lib/image_utilities.c
-yt/utilities/lib/Interpolators.c
+yt/utilities/lib/interpolators.c
yt/utilities/lib/kdtree.c
yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
-yt/utilities/lib/Octree.c
-yt/utilities/lib/GridTree.c
+yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/pixelization_routines.c
yt/utilities/lib/png_writer.c
-yt/utilities/lib/PointsInVolume.c
-yt/utilities/lib/QuadTree.c
-yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/VolumeIntegrator.c
yt/utilities/lib/grid_traversal.c
yt/utilities/lib/marching_cubes.c
yt/utilities/lib/png_writer.h
yt/utilities/lib/write_array.c
-yt/utilities/lib/element_mappings.c
-yt/utilities/lib/mesh_construction.cpp
-yt/utilities/lib/mesh_samplers.cpp
-yt/utilities/lib/mesh_traversal.cpp
-yt/utilities/lib/mesh_intersection.cpp
syntax: glob
*.pyc
.*.swp
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
- """Insert an evaluated notebook into a document
-
- This uses runipy and nbconvert to transform a path to an unevaluated notebook
- into html suitable for embedding in a Sphinx document.
- """
- required_arguments = 1
- optional_arguments = 1
- option_spec = {'skip_exceptions': directives.flag}
- final_argument_whitespace = True
-
- def run(self): # check if there are spaces in the notebook name
- nb_path = self.arguments[0]
- if ' ' in nb_path: raise ValueError(
- "Due to issues with docutils stripping spaces from links, white "
- "space is not allowed in notebook filenames '{0}'".format(nb_path))
- # check if raw html is supported
- if not self.state.document.settings.raw_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
-
- cwd = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- os.chdir(tmpdir)
-
- # get path to notebook
- nb_filename = self.arguments[0]
- nb_basename = os.path.basename(nb_filename)
- rst_file = self.state_machine.document.attributes['source']
- rst_dir = os.path.abspath(os.path.dirname(rst_file))
- nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
- # Move files around.
- rel_dir = os.path.relpath(rst_dir, setup.confdir)
- dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
- dest_path = os.path.join(dest_dir, nb_basename)
-
- image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
- # Ensure desination build directory exists
- thread_safe_mkdir(os.path.dirname(dest_path))
-
- # Copy unevaluated notebook
- shutil.copyfile(nb_abs_path, dest_path)
-
- # Construct paths to versions getting copied over
- dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
- dest_path_script = string.replace(dest_path, '.ipynb', '.py')
- rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
- rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
- # Create python script vesion
- script_text = nb_to_python(nb_abs_path)
- f = open(dest_path_script, 'w')
- f.write(script_text.encode('utf8'))
- f.close()
-
- skip_exceptions = 'skip_exceptions' in self.options
-
- ret = evaluate_notebook(
- nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
- try:
- evaluated_text, resources = ret
- evaluated_text = write_notebook_output(
- resources, image_dir, image_rel_dir, evaluated_text)
- except ValueError:
- # This happens when a notebook raises an unhandled exception
- evaluated_text = ret
-
- # Create link to notebook and script files
- link_rst = "(" + \
- formatted_link(nb_basename) + "; " + \
- formatted_link(rel_path_eval) + "; " + \
- formatted_link(rel_path_script) + \
- ")"
-
- self.state_machine.insert_input([link_rst], rst_file)
-
- # create notebook node
- attributes = {'format': 'html', 'source': 'nb_path'}
- nb_node = notebook_node('', evaluated_text, **attributes)
- (nb_node.source, nb_node.line) = \
- self.state_machine.get_source_and_line(self.lineno)
-
- # add dependency
- self.state.document.settings.record_dependencies.add(nb_abs_path)
-
- # clean up
- os.chdir(cwd)
- shutil.rmtree(tmpdir, True)
-
- return [nb_node]
-
-
-class notebook_node(nodes.raw):
- pass
-
-def nb_to_python(nb_path):
- """convert notebook to python script"""
- exporter = python.PythonExporter()
- output, resources = exporter.from_filename(nb_path)
- return output
-
-def nb_to_html(nb_path):
- """convert notebook to html"""
- c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
- exporter = html.HTMLExporter(template_file='full', config=c)
- notebook = nbformat.read(open(nb_path), 'json')
- output, resources = exporter.from_notebook_node(notebook)
- header = output.split('<head>', 1)[1].split('</head>',1)[0]
- body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
- # http://imgur.com/eR9bMRH
- header = header.replace('<style', '<style scoped="scoped"')
- header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n',
- '')
- header = header.replace("code,pre{", "code{")
-
- # Filter out styles that conflict with the sphinx theme.
- filter_strings = [
- 'navbar',
- 'body{',
- 'alert{',
- 'uneditable-input{',
- 'collapse{',
- ]
-
- filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
- line_begin = [
- 'pre{',
- 'p{margin'
- ]
-
- filterfunc = lambda x: not any([s in x for s in filter_strings])
- header_lines = filter(filterfunc, header.split('\n'))
-
- filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
- header_lines = filter(filterfunc, header_lines)
-
- header = '\n'.join(header_lines)
-
- # concatenate raw html lines
- lines = ['<div class="ipynotebook">']
- lines.append(header)
- lines.append(body)
- lines.append('</div>')
- return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
- # Create evaluated version and save it to the dest path.
- notebook = nbformat.read(open(nb_path), 'json')
- nb_runner = NotebookRunner(notebook, pylab=False)
- try:
- nb_runner.run_notebook(skip_exceptions=skip_exceptions)
- except NotebookError as e:
- print('')
- print(e)
- # Return the traceback, filtering out ANSI color codes.
- # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
- return "Notebook conversion failed with the " \
- "following traceback: \n%s" % \
- re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
- str(e))
-
- if dest_path is None:
- dest_path = 'temp_evaluated.ipynb'
- nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
- ret = nb_to_html(dest_path)
- if dest_path is 'temp_evaluated.ipynb':
- os.remove(dest_path)
- return ret
-
-def formatted_link(path):
- return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
- self.visit_raw(node)
-
-def depart_notebook_node(self, node):
- self.depart_raw(node)
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_node(notebook_node,
- html=(visit_notebook_node, depart_notebook_node))
-
- app.add_directive('notebook', NotebookDirective)
-
- retdict = dict(
- version='0.1',
- parallel_read_safe=True,
- parallel_write_safe=True
- )
-
- return retdict
-
-def make_image_dir(setup, rst_dir):
- image_dir = setup.app.builder.outdir + os.path.sep + '_images'
- rel_dir = os.path.relpath(setup.confdir, rst_dir)
- image_rel_dir = rel_dir + os.path.sep + '_images'
- thread_safe_mkdir(image_dir)
- return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
- my_uuid = uuid.uuid4().hex
-
- for output in resources['outputs']:
- new_name = image_dir + os.path.sep + my_uuid + output
- new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
- evaluated_text = evaluated_text.replace(output, new_relative_name)
- with open(new_name, 'wb') as f:
- f.write(resources['outputs'][output])
- return evaluated_text
-
-def thread_safe_mkdir(dirname):
- try:
- os.makedirs(dirname)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- pass
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
- notebook_node, visit_notebook_node, depart_notebook_node, \
- evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
- """Insert an evaluated notebook cell into a document
-
- This uses runipy and nbconvert to transform an inline python
- script into html suitable for embedding in a Sphinx document.
- """
- required_arguments = 0
- optional_arguments = 1
- has_content = True
- option_spec = {'skip_exceptions': directives.flag}
-
- def run(self):
- # check if raw html is supported
- if not self.state.document.settings.raw_enabled:
- raise self.warning('"%s" directive disabled.' % self.name)
-
- cwd = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- os.chdir(tmpdir)
-
- rst_file = self.state_machine.document.attributes['source']
- rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
- image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
- # Construct notebook from cell content
- content = "\n".join(self.content)
- with open("temp.py", "w") as f:
- f.write(content)
-
- convert_to_ipynb('temp.py', 'temp.ipynb')
-
- skip_exceptions = 'skip_exceptions' in self.options
-
- evaluated_text, resources = evaluate_notebook(
- 'temp.ipynb', skip_exceptions=skip_exceptions)
-
- evaluated_text = write_notebook_output(
- resources, image_dir, image_rel_dir, evaluated_text)
-
- # create notebook node
- attributes = {'format': 'html', 'source': 'nb_path'}
- nb_node = notebook_node('', evaluated_text, **attributes)
- (nb_node.source, nb_node.line) = \
- self.state_machine.get_source_and_line(self.lineno)
-
- # clean up
- os.chdir(cwd)
- shutil.rmtree(tmpdir, True)
-
- return [nb_node]
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_node(notebook_node,
- html=(visit_notebook_node, depart_notebook_node))
-
- app.add_directive('notebook-cell', NotebookCellDirective)
-
- retdict = dict(
- version='0.1',
- parallel_read_safe=True,
- parallel_write_safe=True
- )
-
- return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
- with io.open(py_file, 'r', encoding='utf-8') as f:
- notebook = current.reads(f.read(), format='py')
- with io.open(ipynb_file, 'w', encoding='utf-8') as f:
- current.write(notebook, f, format='ipynb')
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
- """ A comment block.
- """
- is_comment = True
- def __init__(self, start_lineno, end_lineno, text):
- # int : The first line number in the block. 1-indexed.
- self.start_lineno = start_lineno
- # int : The last line number. Inclusive!
- self.end_lineno = end_lineno
- # str : The text block including '#' character but not any leading spaces.
- self.text = text
-
- def add(self, string, start, end, line):
- """ Add a new comment line.
- """
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
- self.text += string
-
- def __repr__(self):
- return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno, self.text)
-
-
-class NonComment(object):
- """ A non-comment block of code.
- """
- is_comment = False
- def __init__(self, start_lineno, end_lineno):
- self.start_lineno = start_lineno
- self.end_lineno = end_lineno
-
- def add(self, string, start, end, line):
- """ Add lines to the block.
- """
- if string.strip():
- # Only add if not entirely whitespace.
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno)
-
-
-class CommentBlocker(object):
- """ Pull out contiguous comment blocks.
- """
- def __init__(self):
- # Start with a dummy.
- self.current_block = NonComment(0, 0)
-
- # All of the blocks seen so far.
- self.blocks = []
-
- # The index mapping lines of code to their associated comment blocks.
- self.index = {}
-
- def process_file(self, file):
- """ Process a file object.
- """
- for token in tokenize.generate_tokens(file.next):
- self.process_token(*token)
- self.make_index()
-
- def process_token(self, kind, string, start, end, line):
- """ Process a single token.
- """
- if self.current_block.is_comment:
- if kind == tokenize.COMMENT:
- self.current_block.add(string, start, end, line)
- else:
- self.new_noncomment(start[0], end[0])
- else:
- if kind == tokenize.COMMENT:
- self.new_comment(string, start, end, line)
- else:
- self.current_block.add(string, start, end, line)
-
- def new_noncomment(self, start_lineno, end_lineno):
- """ We are transitioning from a noncomment to a comment.
- """
- block = NonComment(start_lineno, end_lineno)
- self.blocks.append(block)
- self.current_block = block
-
- def new_comment(self, string, start, end, line):
- """ Possibly add a new comment.
-
- Only adds a new comment if this comment is the only thing on the line.
- Otherwise, it extends the noncomment block.
- """
- prefix = line[:start[1]]
- if prefix.strip():
- # Oops! Trailing comment, not a comment block.
- self.current_block.add(string, start, end, line)
- else:
- # A comment block.
- block = Comment(start[0], end[0], string)
- self.blocks.append(block)
- self.current_block = block
-
- def make_index(self):
- """ Make the index mapping lines of actual code to their associated
- prefix comments.
- """
- for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
- if not block.is_comment:
- self.index[block.start_lineno] = prev
-
- def search_for_comment(self, lineno, default=None):
- """ Find the comment block just before the given line number.
-
- Returns None (or the specified default) if there is no such block.
- """
- if not self.index:
- self.make_index()
- block = self.index.get(lineno, None)
- text = getattr(block, 'text', default)
- return text
-
-
-def strip_comment_marker(text):
- """ Strip # markers at the front of a block of comment text.
- """
- lines = []
- for line in text.splitlines():
- lines.append(line.lstrip('#'))
- text = textwrap.dedent('\n'.join(lines))
- return text
-
-
-def get_class_traits(klass):
- """ Yield all of the documentation for trait definitions on a class object.
- """
- # FIXME: gracefully handle errors here or in the caller?
- source = inspect.getsource(klass)
- cb = CommentBlocker()
- cb.process_file(StringIO(source))
- mod_ast = compiler.parse(source)
- class_ast = mod_ast.node.nodes[0]
- for node in class_ast.code.nodes:
- # FIXME: handle other kinds of assignments?
- if isinstance(node, compiler.ast.Assign):
- name = node.nodes[0].name
- rhs = unparse(node.expr).strip()
- doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
- yield name, rhs, doc
-
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
- The unparse method takes a compiler.ast tree and transforms it back into
- valid python code. It is incomplete and currently only works for
- import statements, function calls, function definitions, assignments, and
- basic expressions.
-
- Inspired by python-2.5-svn/Demo/parser/unparse.py
-
- fixme: We may want to move to using _ast trees because the compiler for
- them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
- s = cStringIO.StringIO()
- UnparseCompilerAst(ast, s, single_line_functions)
- return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
- 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
- """ Methods in this class recursively traverse an AST and
- output source code for the abstract syntax; original formatting
- is disregarged.
- """
-
- #########################################################################
- # object interface.
- #########################################################################
-
- def __init__(self, tree, file = sys.stdout, single_line_functions=False):
- """ Unparser(tree, file=sys.stdout) -> None.
-
- Print the source for tree to file.
- """
- self.f = file
- self._single_func = single_line_functions
- self._do_indent = True
- self._indent = 0
- self._dispatch(tree)
- self._write("\n")
- self.f.flush()
-
- #########################################################################
- # Unparser private interface.
- #########################################################################
-
- ### format, output, and dispatch methods ################################
-
- def _fill(self, text = ""):
- "Indent a piece of text, according to the current indentation level"
- if self._do_indent:
- self._write("\n"+" "*self._indent + text)
- else:
- self._write(text)
-
- def _write(self, text):
- "Append a piece of text to the current line."
- self.f.write(text)
-
- def _enter(self):
- "Print ':', and increase the indentation."
- self._write(": ")
- self._indent += 1
-
- def _leave(self):
- "Decrease the indentation level."
- self._indent -= 1
-
- def _dispatch(self, tree):
- "_dispatcher function, _dispatching tree type T to method _T."
- if isinstance(tree, list):
- for t in tree:
- self._dispatch(t)
- return
- meth = getattr(self, "_"+tree.__class__.__name__)
- if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
- return
- meth(tree)
-
-
- #########################################################################
- # compiler.ast unparsing methods.
- #
- # There should be one method per concrete grammar type. They are
- # organized in alphabetical order.
- #########################################################################
-
- def _Add(self, t):
- self.__binary_op(t, '+')
-
- def _And(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") and (")
- self._write(")")
-
- def _AssAttr(self, t):
- """ Handle assigning an attribute of an object
- """
- self._dispatch(t.expr)
- self._write('.'+t.attrname)
-
- def _Assign(self, t):
- """ Expression Assignment such as "a = 1".
-
- This only handles assignment in expressions. Keyword assignment
- is handled separately.
- """
- self._fill()
- for target in t.nodes:
- self._dispatch(target)
- self._write(" = ")
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write('; ')
-
- def _AssName(self, t):
- """ Name on left hand side of expression.
-
- Treat just like a name on the right side of an expression.
- """
- self._Name(t)
-
- def _AssTuple(self, t):
- """ Tuple on left hand side of an expression.
- """
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- def _AugAssign(self, t):
- """ +=,-=,*=,/=,**=, etc. operations
- """
-
- self._fill()
- self._dispatch(t.node)
- self._write(' '+t.op+' ')
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write(';')
-
- def _Bitand(self, t):
- """ Bit and operation.
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" & ")
-
- def _Bitor(self, t):
- """ Bit or operation
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" | ")
-
- def _CallFunc(self, t):
- """ Function call.
- """
- self._dispatch(t.node)
- self._write("(")
- comma = False
- for e in t.args:
- if comma: self._write(", ")
- else: comma = True
- self._dispatch(e)
- if t.star_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("*")
- self._dispatch(t.star_args)
- if t.dstar_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("**")
- self._dispatch(t.dstar_args)
- self._write(")")
-
- def _Compare(self, t):
- self._dispatch(t.expr)
- for op, expr in t.ops:
- self._write(" " + op + " ")
- self._dispatch(expr)
-
- def _Const(self, t):
- """ A constant value such as an integer value, 3, or a string, "hello".
- """
- self._dispatch(t.value)
-
- def _Decorators(self, t):
- """ Handle function decorators (eg. @has_units)
- """
- for node in t.nodes:
- self._dispatch(node)
-
- def _Dict(self, t):
- self._write("{")
- for i, (k, v) in enumerate(t.items):
- self._dispatch(k)
- self._write(": ")
- self._dispatch(v)
- if i < len(t.items)-1:
- self._write(", ")
- self._write("}")
-
- def _Discard(self, t):
- """ Node for when return value is ignored such as in "foo(a)".
- """
- self._fill()
- self._dispatch(t.expr)
-
- def _Div(self, t):
- self.__binary_op(t, '/')
-
- def _Ellipsis(self, t):
- self._write("...")
-
- def _From(self, t):
- """ Handle "from xyz import foo, bar as baz".
- """
- # fixme: Are From and ImportFrom handled differently?
- self._fill("from ")
- self._write(t.modname)
- self._write(" import ")
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Function(self, t):
- """ Handle function definitions
- """
- if t.decorators is not None:
- self._fill("@")
- self._dispatch(t.decorators)
- self._fill("def "+t.name + "(")
- defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
- for i, arg in enumerate(zip(t.argnames, defaults)):
- self._write(arg[0])
- if arg[1] is not None:
- self._write('=')
- self._dispatch(arg[1])
- if i < len(t.argnames)-1:
- self._write(', ')
- self._write(")")
- if self._single_func:
- self._do_indent = False
- self._enter()
- self._dispatch(t.code)
- self._leave()
- self._do_indent = True
-
- def _Getattr(self, t):
- """ Handle getting an attribute of an object
- """
- if isinstance(t.expr, (Div, Mul, Sub, Add)):
- self._write('(')
- self._dispatch(t.expr)
- self._write(')')
- else:
- self._dispatch(t.expr)
-
- self._write('.'+t.attrname)
-
- def _If(self, t):
- self._fill()
-
- for i, (compare,code) in enumerate(t.tests):
- if i == 0:
- self._write("if ")
- else:
- self._write("elif ")
- self._dispatch(compare)
- self._enter()
- self._fill()
- self._dispatch(code)
- self._leave()
- self._write("\n")
-
- if t.else_ is not None:
- self._write("else")
- self._enter()
- self._fill()
- self._dispatch(t.else_)
- self._leave()
- self._write("\n")
-
- def _IfExp(self, t):
- self._dispatch(t.then)
- self._write(" if ")
- self._dispatch(t.test)
-
- if t.else_ is not None:
- self._write(" else (")
- self._dispatch(t.else_)
- self._write(")")
-
- def _Import(self, t):
- """ Handle "import xyz.foo".
- """
- self._fill("import ")
-
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Keyword(self, t):
- """ Keyword value assignment within function calls and definitions.
- """
- self._write(t.name)
- self._write("=")
- self._dispatch(t.expr)
-
- def _List(self, t):
- self._write("[")
- for i,node in enumerate(t.nodes):
- self._dispatch(node)
- if i < len(t.nodes)-1:
- self._write(", ")
- self._write("]")
-
- def _Module(self, t):
- if t.doc is not None:
- self._dispatch(t.doc)
- self._dispatch(t.node)
-
- def _Mul(self, t):
- self.__binary_op(t, '*')
-
- def _Name(self, t):
- self._write(t.name)
-
- def _NoneType(self, t):
- self._write("None")
-
- def _Not(self, t):
- self._write('not (')
- self._dispatch(t.expr)
- self._write(')')
-
- def _Or(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") or (")
- self._write(")")
-
- def _Pass(self, t):
- self._write("pass\n")
-
- def _Printnl(self, t):
- self._fill("print ")
- if t.dest:
- self._write(">> ")
- self._dispatch(t.dest)
- self._write(", ")
- comma = False
- for node in t.nodes:
- if comma: self._write(', ')
- else: comma = True
- self._dispatch(node)
-
- def _Power(self, t):
- self.__binary_op(t, '**')
-
- def _Return(self, t):
- self._fill("return ")
- if t.value:
- if isinstance(t.value, Tuple):
- text = ', '.join([ name.name for name in t.value.asList() ])
- self._write(text)
- else:
- self._dispatch(t.value)
- if not self._do_indent:
- self._write('; ')
-
- def _Slice(self, t):
- self._dispatch(t.expr)
- self._write("[")
- if t.lower:
- self._dispatch(t.lower)
- self._write(":")
- if t.upper:
- self._dispatch(t.upper)
- #if t.step:
- # self._write(":")
- # self._dispatch(t.step)
- self._write("]")
-
- def _Sliceobj(self, t):
- for i, node in enumerate(t.nodes):
- if i != 0:
- self._write(":")
- if not (isinstance(node, Const) and node.value is None):
- self._dispatch(node)
-
- def _Stmt(self, tree):
- for node in tree.nodes:
- self._dispatch(node)
-
- def _Sub(self, t):
- self.__binary_op(t, '-')
-
- def _Subscript(self, t):
- self._dispatch(t.expr)
- self._write("[")
- for i, value in enumerate(t.subs):
- if i != 0:
- self._write(",")
- self._dispatch(value)
- self._write("]")
-
- def _TryExcept(self, t):
- self._fill("try")
- self._enter()
- self._dispatch(t.body)
- self._leave()
-
- for handler in t.handlers:
- self._fill('except ')
- self._dispatch(handler[0])
- if handler[1] is not None:
- self._write(', ')
- self._dispatch(handler[1])
- self._enter()
- self._dispatch(handler[2])
- self._leave()
-
- if t.else_:
- self._fill("else")
- self._enter()
- self._dispatch(t.else_)
- self._leave()
-
- def _Tuple(self, t):
-
- if not t.nodes:
- # Empty tuple.
- self._write("()")
- else:
- self._write("(")
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- self._write(")")
-
- def _UnaryAdd(self, t):
- self._write("+")
- self._dispatch(t.expr)
-
- def _UnarySub(self, t):
- self._write("-")
- self._dispatch(t.expr)
-
- def _With(self, t):
- self._fill('with ')
- self._dispatch(t.expr)
- if t.vars:
- self._write(' as ')
- self._dispatch(t.vars.name)
- self._enter()
- self._dispatch(t.body)
- self._leave()
- self._write('\n')
-
- def _int(self, t):
- self._write(repr(t))
-
- def __binary_op(self, t, symbol):
- # Check if parenthesis are needed on left side and then dispatch
- has_paren = False
- left_class = str(t.left.__class__)
- if (left_class in op_precedence.keys() and
- op_precedence[left_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.left)
- if has_paren:
- self._write(')')
- # Write the appropriate symbol for operator
- self._write(symbol)
- # Check if parenthesis are needed on the right side and then dispatch
- has_paren = False
- right_class = str(t.right.__class__)
- if (right_class in op_precedence.keys() and
- op_precedence[right_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.right)
- if has_paren:
- self._write(')')
-
- def _float(self, t):
- # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
- # We prefer str here.
- self._write(str(t))
-
- def _str(self, t):
- self._write(repr(t))
-
- def _tuple(self, t):
- self._write(str(t))
-
- #########################################################################
- # These are the methods from the _ast modules unparse.
- #
- # As our needs to handle more advanced code increase, we may want to
- # modify some of the methods below so that they work for compiler.ast.
- #########################################################################
-
-# # stmt
-# def _Expr(self, tree):
-# self._fill()
-# self._dispatch(tree.value)
-#
-# def _Import(self, t):
-# self._fill("import ")
-# first = True
-# for a in t.names:
-# if first:
-# first = False
-# else:
-# self._write(", ")
-# self._write(a.name)
-# if a.asname:
-# self._write(" as "+a.asname)
-#
-## def _ImportFrom(self, t):
-## self._fill("from ")
-## self._write(t.module)
-## self._write(" import ")
-## for i, a in enumerate(t.names):
-## if i == 0:
-## self._write(", ")
-## self._write(a.name)
-## if a.asname:
-## self._write(" as "+a.asname)
-## # XXX(jpe) what is level for?
-##
-#
-# def _Break(self, t):
-# self._fill("break")
-#
-# def _Continue(self, t):
-# self._fill("continue")
-#
-# def _Delete(self, t):
-# self._fill("del ")
-# self._dispatch(t.targets)
-#
-# def _Assert(self, t):
-# self._fill("assert ")
-# self._dispatch(t.test)
-# if t.msg:
-# self._write(", ")
-# self._dispatch(t.msg)
-#
-# def _Exec(self, t):
-# self._fill("exec ")
-# self._dispatch(t.body)
-# if t.globals:
-# self._write(" in ")
-# self._dispatch(t.globals)
-# if t.locals:
-# self._write(", ")
-# self._dispatch(t.locals)
-#
-# def _Print(self, t):
-# self._fill("print ")
-# do_comma = False
-# if t.dest:
-# self._write(">>")
-# self._dispatch(t.dest)
-# do_comma = True
-# for e in t.values:
-# if do_comma:self._write(", ")
-# else:do_comma=True
-# self._dispatch(e)
-# if not t.nl:
-# self._write(",")
-#
-# def _Global(self, t):
-# self._fill("global")
-# for i, n in enumerate(t.names):
-# if i != 0:
-# self._write(",")
-# self._write(" " + n)
-#
-# def _Yield(self, t):
-# self._fill("yield")
-# if t.value:
-# self._write(" (")
-# self._dispatch(t.value)
-# self._write(")")
-#
-# def _Raise(self, t):
-# self._fill('raise ')
-# if t.type:
-# self._dispatch(t.type)
-# if t.inst:
-# self._write(", ")
-# self._dispatch(t.inst)
-# if t.tback:
-# self._write(", ")
-# self._dispatch(t.tback)
-#
-#
-# def _TryFinally(self, t):
-# self._fill("try")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# self._fill("finally")
-# self._enter()
-# self._dispatch(t.finalbody)
-# self._leave()
-#
-# def _excepthandler(self, t):
-# self._fill("except ")
-# if t.type:
-# self._dispatch(t.type)
-# if t.name:
-# self._write(", ")
-# self._dispatch(t.name)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _ClassDef(self, t):
-# self._write("\n")
-# self._fill("class "+t.name)
-# if t.bases:
-# self._write("(")
-# for a in t.bases:
-# self._dispatch(a)
-# self._write(", ")
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _FunctionDef(self, t):
-# self._write("\n")
-# for deco in t.decorators:
-# self._fill("@")
-# self._dispatch(deco)
-# self._fill("def "+t.name + "(")
-# self._dispatch(t.args)
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _For(self, t):
-# self._fill("for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# def _While(self, t):
-# self._fill("while ")
-# self._dispatch(t.test)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# # expr
-# def _Str(self, tree):
-# self._write(repr(tree.s))
-##
-# def _Repr(self, t):
-# self._write("`")
-# self._dispatch(t.value)
-# self._write("`")
-#
-# def _Num(self, t):
-# self._write(repr(t.n))
-#
-# def _ListComp(self, t):
-# self._write("[")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write("]")
-#
-# def _GeneratorExp(self, t):
-# self._write("(")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write(")")
-#
-# def _comprehension(self, t):
-# self._write(" for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# for if_clause in t.ifs:
-# self._write(" if ")
-# self._dispatch(if_clause)
-#
-# def _IfExp(self, t):
-# self._dispatch(t.body)
-# self._write(" if ")
-# self._dispatch(t.test)
-# if t.orelse:
-# self._write(" else ")
-# self._dispatch(t.orelse)
-#
-# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-# def _UnaryOp(self, t):
-# self._write(self.unop[t.op.__class__.__name__])
-# self._write("(")
-# self._dispatch(t.operand)
-# self._write(")")
-#
-# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-# "FloorDiv":"//", "Pow": "**"}
-# def _BinOp(self, t):
-# self._write("(")
-# self._dispatch(t.left)
-# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-# self._dispatch(t.right)
-# self._write(")")
-#
-# boolops = {_ast.And: 'and', _ast.Or: 'or'}
-# def _BoolOp(self, t):
-# self._write("(")
-# self._dispatch(t.values[0])
-# for v in t.values[1:]:
-# self._write(" %s " % self.boolops[t.op.__class__])
-# self._dispatch(v)
-# self._write(")")
-#
-# def _Attribute(self,t):
-# self._dispatch(t.value)
-# self._write(".")
-# self._write(t.attr)
-#
-## def _Call(self, t):
-## self._dispatch(t.func)
-## self._write("(")
-## comma = False
-## for e in t.args:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## for e in t.keywords:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## if t.starargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("*")
-## self._dispatch(t.starargs)
-## if t.kwargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("**")
-## self._dispatch(t.kwargs)
-## self._write(")")
-#
-# # slice
-# def _Index(self, t):
-# self._dispatch(t.value)
-#
-# def _ExtSlice(self, t):
-# for i, d in enumerate(t.dims):
-# if i != 0:
-# self._write(': ')
-# self._dispatch(d)
-#
-# # others
-# def _arguments(self, t):
-# first = True
-# nonDef = len(t.args)-len(t.defaults)
-# for a in t.args[0:nonDef]:
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a)
-# for a,d in zip(t.args[nonDef:], t.defaults):
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a),
-# self._write("=")
-# self._dispatch(d)
-# if t.vararg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("*"+t.vararg)
-# if t.kwarg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("**"+t.kwarg)
-#
-## def _keyword(self, t):
-## self._write(t.arg)
-## self._write("=")
-## self._dispatch(t.value)
-#
-# def _Lambda(self, t):
-# self._write("lambda ")
-# self._dispatch(t.args)
-# self._write(": ")
-# self._dispatch(t.body)
-
-
-
diff -r 1a36952224dfd1dbc02bcc245ea240da55ac5211 -r cf61b281a71926ba9b38596923387e135f250039 doc/extensions/numpydocmod/docscrape.py
--- a/doc/extensions/numpydocmod/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
- """A line-based string reader.
-
- """
- def __init__(self, data):
- """
- Parameters
- ----------
- data : str
- String with lines separated by '\n'.
-
- """
- if isinstance(data,list):
- self._str = data
- else:
- self._str = data.split('\n') # store string as list of lines
-
- self.reset()
-
- def __getitem__(self, n):
- return self._str[n]
-
- def reset(self):
- self._l = 0 # current line nr
-
- def read(self):
- if not self.eof():
- out = self[self._l]
- self._l += 1
- return out
- else:
- return ''
-
- def seek_next_non_empty_line(self):
- for l in self[self._l:]:
- if l.strip():
- break
- else:
- self._l += 1
-
- def eof(self):
- return self._l >= len(self._str)
-
- def read_to_condition(self, condition_func):
- start = self._l
- for line in self[start:]:
- if condition_func(line):
- return self[start:self._l]
- self._l += 1
- if self.eof():
- return self[start:self._l+1]
- return []
-
- def read_to_next_empty_line(self):
- self.seek_next_non_empty_line()
- def is_empty(line):
- return not line.strip()
- return self.read_to_condition(is_empty)
-
- def read_to_next_unindented_line(self):
- def is_unindented(line):
- return (line.strip() and (len(line.lstrip()) == len(line)))
- return self.read_to_condition(is_unindented)
-
- def peek(self,n=0):
- if self._l + n < len(self._str):
- return self[self._l + n]
- else:
- return ''
-
- def is_empty(self):
- return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
- def __init__(self, docstring, config={}):
- docstring = textwrap.dedent(docstring).split('\n')
-
- self._doc = Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': [''],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Attributes': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'Warnings': [],
- 'References': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def __getitem__(self,key):
- return self._parsed_data[key]
-
- def __setitem__(self,key,val):
- if not self._parsed_data.has_key(key):
- warn("Unknown section %s" % key)
- else:
- self._parsed_data[key] = val
-
- def _is_at_section(self):
- self._doc.seek_next_non_empty_line()
-
- if self._doc.eof():
- return False
-
- l1 = self._doc.peek().strip() # e.g. Parameters
-
- if l1.startswith('.. index::'):
- return True
-
- l2 = self._doc.peek(1).strip() # ---------- or ==========
- return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
- def _strip(self,doc):
- i = 0
- j = 0
- for i,line in enumerate(doc):
- if line.strip(): break
-
- for j,line in enumerate(doc[::-1]):
- if line.strip(): break
-
- return doc[i:len(doc)-j]
-
- def _read_to_next_section(self):
- section = self._doc.read_to_next_empty_line()
-
- while not self._is_at_section() and not self._doc.eof():
- if not self._doc.peek(-1).strip(): # previous line was empty
- section += ['']
-
- section += self._doc.read_to_next_empty_line()
-
- return section
-
- def _read_sections(self):
- while not self._doc.eof():
- data = self._read_to_next_section()
- name = data[0].strip()
-
- if name.startswith('..'): # index section
- yield name, data[1:]
- elif len(data) < 2:
- yield StopIteration
- else:
- yield name, self._strip(data[2:])
-
- def _parse_param_list(self,content):
- r = Reader(content)
- params = []
- while not r.eof():
- header = r.read().strip()
- if ' : ' in header:
- arg_name, arg_type = header.split(' : ')[:2]
- else:
- arg_name, arg_type = header, ''
-
- desc = r.read_to_next_unindented_line()
- desc = dedent_lines(desc)
-
- params.append((arg_name,arg_type,desc))
-
- return params
-
-
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
- def _parse_see_also(self, content):
- """
- func_name : Descriptive text
- continued text
- another_func_name : Descriptive text
- func_name1, func_name2, :meth:`func_name`, func_name3
-
- """
- items = []
-
- def parse_item_name(text):
- """Match ':role:`name`' or 'name'"""
- m = self._name_rgx.match(text)
- if m:
- g = m.groups()
- if g[1] is None:
- return g[3], None
- else:
- return g[2], g[1]
- raise ValueError("%s is not a item name" % text)
-
- def push_item(name, rest):
- if not name:
- return
- name, role = parse_item_name(name)
- items.append((name, list(rest), role))
- del rest[:]
-
- current_func = None
- rest = []
-
- for line in content:
- if not line.strip(): continue
-
- m = self._name_rgx.match(line)
- if m and line[m.end():].strip().startswith(':'):
- push_item(current_func, rest)
- current_func, line = line[:m.end()], line[m.end():]
- rest = [line.split(':', 1)[1].strip()]
- if not rest[0]:
- rest = []
- elif not line.startswith(' '):
- push_item(current_func, rest)
- current_func = None
- if ',' in line:
- for func in line.split(','):
- if func.strip():
- push_item(func, [])
- elif line.strip():
- current_func = line
- elif current_func is not None:
- rest.append(line.strip())
- push_item(current_func, rest)
- return items
-
- def _parse_index(self, section, content):
- """
- .. index: default
- :refguide: something, else, and more
-
- """
- def strip_each_in(lst):
- return [s.strip() for s in lst]
-
- out = {}
- section = section.split('::')
- if len(section) > 1:
- out['default'] = strip_each_in(section[1].split(','))[0]
- for line in content:
- line = line.split(':')
- if len(line) > 2:
- out[line[1]] = strip_each_in(line[2].split(','))
- return out
-
- def _parse_summary(self):
- """Grab signature (if given) and summary"""
- if self._is_at_section():
- return
-
- summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
- self['Signature'] = summary_str
- if not self._is_at_section():
- self['Summary'] = self._doc.read_to_next_empty_line()
- else:
- self['Summary'] = summary
-
- if not self._is_at_section():
- self['Extended Summary'] = self._read_to_next_section()
-
- def _parse(self):
- self._doc.reset()
- self._parse_summary()
-
- for (section,content) in self._read_sections():
- if not section.startswith('..'):
- section = ' '.join([s.capitalize() for s in section.split(' ')])
- if section in ('Parameters', 'Returns', 'Raises', 'Warns',
- 'Other Parameters', 'Attributes', 'Methods'):
- self[section] = self._parse_param_list(content)
- elif section.startswith('.. index::'):
- self['index'] = self._parse_index(section, content)
- elif section == 'See Also':
- self['See Also'] = self._parse_see_also(content)
- else:
- self[section] = content
-
- # string conversion routines
-
- def _str_header(self, name, symbol='-'):
- return [name, len(name)*symbol]
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- if self['Signature']:
- return [self['Signature'].replace('*','\*')] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- if self['Summary']:
- return self['Summary'] + ['']
- else:
- return []
-
- def _str_extended_summary(self):
- if self['Extended Summary']:
- return self['Extended Summary'] + ['']
- else:
- return []
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- for param,param_type,desc in self[name]:
- out += ['%s : %s' % (param, param_type)]
- out += self._str_indent(desc)
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += self[name]
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- if not self['See Also']: return []
- out = []
- out += self._str_header("See Also")
- last_had_desc = True
- for func, desc, role in self['See Also']:
- if role:
- link = ':%s:`%s`' % (role, func)
- elif func_role:
- link = ':%s:`%s`' % (func_role, func)
- else:
- link = "`%s`_" % func
- if desc or last_had_desc:
- out += ['']
- out += [link]
- else:
- out[-1] += ", %s" % link
- if desc:
- out += self._str_indent([' '.join(desc)])
- last_had_desc = True
- else:
- last_had_desc = False
- out += ['']
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.iteritems():
- if section == 'default':
- continue
- out += [' :%s: %s' % (section, ', '.join(references))]
- return out
-
- def __str__(self, func_role=''):
- out = []
- out += self._str_signature()
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_section('Warnings')
- out += self._str_see_also(func_role)
- for s in ('Notes','References','Examples'):
- out += self._str_section(s)
- for param_list in ('Attributes', 'Methods'):
- out += self._str_param_list(param_list)
- out += self._str_index()
- return '\n'.join(out)
-
-
-def indent(str,indent=4):
- indent_str = ' '*indent
- if str is None:
- return indent_str
- lines = str.split('\n')
- return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
- """Deindent a list of lines maximally"""
- return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
- return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
- def __init__(self, func, role='func', doc=None, config={}):
- self._f = func
- self._role = role # e.g. "func" or "meth"
-
- if doc is None:
- if func is None:
- raise ValueError("No function or docstring given")
- doc = inspect.getdoc(func) or ''
- NumpyDocString.__init__(self, doc)
-
- if not self['Signature'] and func is not None:
- func, func_name = self.get_func()
- try:
- # try to read signature
- argspec = inspect.getargspec(func)
- argspec = inspect.formatargspec(*argspec)
- argspec = argspec.replace('*','\*')
- signature = '%s%s' % (func_name, argspec)
- except TypeError, e:
- signature = '%s()' % func_name
- self['Signature'] = signature
-
- def get_func(self):
- func_name = getattr(self._f, '__name__', self.__class__.__name__)
- if inspect.isclass(self._f):
- func = getattr(self._f, '__call__', self._f.__init__)
- else:
- func = self._f
- return func, func_name
-
- def __str__(self):
- out = ''
-
- func, func_name = self.get_func()
- signature = self['Signature'].replace('*', '\*')
-
- roles = {'func': 'function',
- 'meth': 'method'}
-
- if self._role:
- if not roles.has_key(self._role):
- print("Warning: invalid role %s" % self._role)
- out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
- func_name)
-
- out += super(FunctionDoc, self).__str__(func_role=self._role)
- return out
-
-
-class ClassDoc(NumpyDocString):
- def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
- config={}):
- if not inspect.isclass(cls) and cls is not None:
- raise ValueError("Expected a class or None, but got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
-
- if doc is None:
- if cls is None:
- raise ValueError("No class or documentation string given")
- doc = pydoc.getdoc(cls)
-
- NumpyDocString.__init__(self, doc)
-
- if config.get('show_class_members', True):
- if not self['Methods']:
- self['Methods'] = [(name, '', '')
- for name in sorted(self.methods)]
- if not self['Attributes']:
- self['Attributes'] = [(name, '', '')
- for name in sorted(self.properties)]
-
- @property
- def methods(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and callable(func)]
-
- @property
- def properties(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and func is None]
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/cfc3fbe4e469/
Changeset: cfc3fbe4e469
Branch: yt
User: qobilidop
Date: 2016-02-05 21:16:49+00:00
Summary: change log message
Affected #: 1 file
diff -r cf61b281a71926ba9b38596923387e135f250039 -r cfc3fbe4e469d45a0dab2b261ada062f3f31c111 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -73,7 +73,8 @@
[1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
over_refine = ds.over_refine_factor)
self.oct_handler.n_ref = ds.n_ref
- only_on_root(mylog.info, "Allocating for %0.3e particles (%s)",
+ only_on_root(mylog.info, "Allocating for %0.3e particles "
+ "(particle type '%s')",
self.total_particles, ptype)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
https://bitbucket.org/yt_analysis/yt/commits/dbcdfc050329/
Changeset: dbcdfc050329
Branch: yt
User: qobilidop
Date: 2016-02-05 22:51:59+00:00
Summary: make the change backward compatible
Affected #: 2 files
diff -r cfc3fbe4e469d45a0dab2b261ada062f3f31c111 -r dbcdfc050329ebaa6b03b6b8bda6d9b0bbdac365 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -123,7 +123,7 @@
f.close()
def _initialize_index(self, data_file, regions):
- ptype = self.ds.ptype
+ ptype = self.ptype
f = _get_h5_handle(data_file.filename)
if ptype == "all":
pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
diff -r cfc3fbe4e469d45a0dab2b261ada062f3f31c111 -r dbcdfc050329ebaa6b03b6b8bda6d9b0bbdac365 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -37,6 +37,13 @@
self.float_type = np.float64
super(ParticleIndex, self).__init__(ds, dataset_type)
+ @property
+ def ptype(self):
+ if hasattr(self.dataset, "ptype"):
+ return self.dataset.ptype
+ else:
+ return "all"
+
def _setup_geometry(self):
mylog.debug("Initializing Particle Geometry Handler.")
self._initialize_particle_handler()
@@ -61,7 +68,7 @@
cls = self.dataset._file_class
self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
for i in range(ndoms)]
- ptype = self.dataset.ptype
+ ptype = self.ptype
if ptype == "all":
self.total_particles = sum(
sum(d.total_particles.values()) for d in self.data_files)
@@ -98,7 +105,10 @@
# * Broadcast back a serialized octree to join
#
# For now we will do this in serial.
- ptype = self.dataset.ptype
+ ptype = self.ptype
+ # Set the ptype attribute of self.io dynamically here, so we don't
+ # need to assume that the dataset has the attribute.
+ self.io.ptype = ptype
morton = np.empty(self.total_particles, dtype="uint64")
ind = 0
for data_file in self.data_files:
https://bitbucket.org/yt_analysis/yt/commits/44f840f01b92/
Changeset: 44f840f01b92
Branch: yt
User: qobilidop
Date: 2016-02-10 04:09:13+00:00
Summary: refactor the logic
Affected #: 1 file
diff -r dbcdfc050329ebaa6b03b6b8bda6d9b0bbdac365 -r 44f840f01b92611e219b0446dc175de6725f1f50 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -127,14 +127,15 @@
f = _get_h5_handle(data_file.filename)
if ptype == "all":
pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+ keys = f.keys()
else:
pcount = f["/Header"].attrs["NumPart_ThisFile"][int(ptype[-1])]
+ keys = [ptype]
morton = np.empty(pcount, dtype='uint64')
ind = 0
- for key in f.keys():
+ for key in keys:
if not key.startswith("PartType"): continue
if "Coordinates" not in f[key]: continue
- if ptype != "all" and key != ptype: continue
ds = f[key]["Coordinates"]
dt = ds.dtype.newbyteorder("N") # Native
pos = np.empty(ds.shape, dtype=dt)
https://bitbucket.org/yt_analysis/yt/commits/0f8594985133/
Changeset: 0f8594985133
Branch: yt
User: qobilidop
Date: 2016-02-22 20:35:47+00:00
Summary: enable ptype oct for StreamParticlesDataset
Affected #: 2 files
diff -r 44f840f01b92611e219b0446dc175de6725f1f50 -r 0f8594985133be6e157bde32b5980ca2a8cba13f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1010,12 +1010,14 @@
filename_template = "stream_file"
n_ref = 64
over_refine_factor = 1
+ ptype = "all"
def load_particles(data, length_unit = None, bbox=None,
sim_time=0.0, mass_unit = None, time_unit = None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
- n_ref = 64, over_refine_factor = 1, geometry = "cartesian"):
+ n_ref = 64, over_refine_factor = 1, ptype = "all",
+ geometry = "cartesian"):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -1137,6 +1139,7 @@
sds = StreamParticlesDataset(handler, geometry=geometry)
sds.n_ref = n_ref
sds.over_refine_factor = over_refine_factor
+ sds.ptype = ptype
return sds
diff -r 44f840f01b92611e219b0446dc175de6725f1f50 -r 0f8594985133be6e157bde32b5980ca2a8cba13f yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -167,8 +167,13 @@
def _initialize_index(self, data_file, regions):
# self.fields[g.id][fname] is the pattern here
+ index_ptype = self.ptype
+ if index_ptype == "all":
+ ptypes = self.ds.particle_types_raw
+ else:
+ ptypes = [index_ptype]
morton = []
- for ptype in self.ds.particle_types_raw:
+ for ptype in ptypes:
try:
pos = np.column_stack(self.fields[data_file.filename][
(ptype, "particle_position_%s" % ax)] for ax in 'xyz')
https://bitbucket.org/yt_analysis/yt/commits/22fe8c96f2e7/
Changeset: 22fe8c96f2e7
Branch: yt
User: qobilidop
Date: 2016-06-08 22:03:24+00:00
Summary: Merge in the development tip.
Affected #: 441 files
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -1,6 +1,6 @@
stephenskory at yahoo.com = s at skory.us
"Stephen Skory stephenskory at yahoo.com" = s at skory.us
-yuan at astro.columbia.edu = bear0980 at gmail.com
+bear0980 at gmail.com = yuan at astro.columbia.edu
juxtaposicion at gmail.com = cemoody at ucsc.edu
chummels at gmail.com = chummels at astro.columbia.edu
jwise at astro.princeton.edu = jwise at physics.gatech.edu
@@ -19,7 +19,6 @@
sername=kayleanelson = kaylea.nelson at yale.edu
kayleanelson = kaylea.nelson at yale.edu
jcforbes at ucsc.edu = jforbes at ucolick.org
-ngoldbau at ucsc.edu = goldbaum at ucolick.org
biondo at wisc.edu = Biondo at wisc.edu
samgeen at googlemail.com = samgeen at gmail.com
fbogert = fbogert at ucsc.edu
@@ -39,4 +38,12 @@
jnaiman at ucolick.org = jnaiman
migueld.deval = miguel at archlinux.net
slevy at ncsa.illinois.edu = salevy at illinois.edu
-malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file
+malzraa at gmail.com = kellerbw at mcmaster.ca
+None = convert-repo
+dfenn = df11c at my.fsu.edu
+langmm = langmm.astro at gmail.com
+jmt354 = jmtomlinson95 at gmail.com
+desika = dnarayan at haverford.edu
+Ben Thompson = bthompson2090 at gmail.com
+goldbaum at ucolick.org = ngoldbau at illinois.edu
+ngoldbau at ucsc.edu = ngoldbau at illinois.edu
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/basic_octree.c
yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
yt/utilities/lib/contour_finding.c
yt/utilities/lib/depth_first_octree.c
yt/utilities/lib/element_mappings.c
@@ -44,9 +45,11 @@
yt/utilities/lib/mesh_intersection.cpp
yt/utilities/lib/mesh_samplers.cpp
yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_triangulation.c
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/primitives.c
yt/utilities/lib/origami.c
yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/pixelization_routines.c
@@ -61,6 +64,7 @@
yt/utilities/lib/write_array.c
syntax: glob
*.pyc
+*.pyd
.*.swp
*.so
.idea/*
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -795,8 +795,8 @@
rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
rather than ``SpecialGrid.__init__()``.
* Docstrings should describe input, output, behavior, and any state changes
- that occur on an object. See the file ``doc/docstring_example.txt`` for a
- fiducial example of a docstring.
+ that occur on an object. See :ref:`docstrings` below for a fiducial example
+ of a docstring.
* Use only one top-level import per line. Unless there is a good reason not to,
imports should happen at the top of the file, after the copyright blurb.
* Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
@@ -843,7 +843,7 @@
be avoided, they must be explained, even if they are only to be passed on to
a nested function.
-.. _docstrings
+.. _docstrings:
Docstrings
----------
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -10,6 +10,7 @@
Alex Bogert (fbogert at ucsc.edu)
André-Patrick Bubel (code at andre-bubel.de)
Pengfei Chen (madcpf at gmail.com)
+ Yi-Hao Chen (yihaochentw at gmail.com)
David Collins (dcollins4096 at gmail.com)
Brian Crosby (crosby.bd at gmail.com)
Andrew Cunningham (ajcunn at gmail.com)
@@ -25,10 +26,12 @@
William Gray (graywilliamj at gmail.com)
Markus Haider (markus.haider at uibk.ac.at)
Eric Hallman (hallman13 at gmail.com)
+ David Hannasch (David.A.Hannasch at gmail.com)
Cameron Hummels (chummels at gmail.com)
Anni Järvenpää (anni.jarvenpaa at gmail.com)
Allyson Julian (astrohckr at gmail.com)
Christian Karch (chiffre at posteo.de)
+ Maximilian Katz (maximilian.katz at stonybrook.edu)
Ben W. Keller (kellerbw at mcmaster.ca)
Ji-hoon Kim (me at jihoonkim.org)
Steffen Klemer (sklemer at phys.uni-goettingen.de)
@@ -60,6 +63,7 @@
Anna Rosen (rosen at ucolick.org)
Chuck Rozhon (rozhon2 at illinois.edu)
Douglas Rudd (drudd at uchicago.edu)
+ Hsi-Yu Schive (hyschive at gmail.com)
Anthony Scopatz (scopatz at gmail.com)
Noel Scudder (noel.scudder at stonybrook.edu)
Pat Shriwise (shriwise at wisc.edu)
@@ -75,6 +79,7 @@
Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
Benjamin Thompson (bthompson2090 at gmail.com)
Robert Thompson (rthompsonj at gmail.com)
+ Joseph Tomlinson (jmtomlinson95 at gmail.com)
Stephanie Tonnesen (stonnes at gmail.com)
Matthew Turk (matthewturk at gmail.com)
Rich Wagner (rwagner at physics.ucsd.edu)
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
include yt/visualization/mapserver/html/map_index.html
include yt/visualization/mapserver/html/leaflet/*.css
include yt/visualization/mapserver/html/leaflet/*.js
@@ -12,4 +12,5 @@
prune doc/source/reference/api/generated
prune doc/build
recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
prune yt/frontends/_skeleton
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 appveyor.yml
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,38 @@
+# AppVeyor.com is a Continuous Integration service to build and run tests under
+# Windows
+
+environment:
+
+ global:
+ PYTHON: "C:\\Miniconda-x64"
+
+ matrix:
+
+ - PYTHON_VERSION: "2.7"
+
+ - PYTHON_VERSION: "3.5"
+
+
+platform:
+ -x64
+
+install:
+ - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
+
+ # Install the build and runtime dependencies of the project.
+ # Create a conda environment
+ - "conda create -q --yes -n test python=%PYTHON_VERSION%"
+ - "activate test"
+
+ # Check that we have the expected version of Python
+ - "python --version"
+
+ # Install specified version of numpy and dependencies
+ - "conda install -q --yes numpy nose setuptools ipython Cython sympy h5py matplotlib"
+ - "python setup.py develop"
+
+# Not a .NET project
+build: false
+
+test_script:
+ - "nosetests -e test_all_fields ."
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 clean.sh
--- a/clean.sh
+++ b/clean.sh
@@ -1,4 +1,1 @@
-find . -name "*.so" -exec rm -v {} \;
-find . -name "*.pyc" -exec rm -v {} \;
-find . -name "__config__.py" -exec rm -v {} \;
-rm -rvf build dist
+hg --config extensions.purge= purge --all yt
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -7,12 +7,12 @@
% To make this come out properly in landscape mode, do one of the following
% 1.
-% pdflatex latexsheet.tex
+% pdflatex cheatsheet.tex
%
% 2.
-% latex latexsheet.tex
-% dvips -P pdf -t landscape latexsheet.dvi
-% ps2pdf latexsheet.ps
+% latex cheatsheet.tex
+% dvips -P pdf -t landscape cheatsheet.dvi
+% ps2pdf cheatsheet.ps
% If you're reading this, be prepared for confusion. Making this was
@@ -45,7 +45,7 @@
% Turn off header and footer
\pagestyle{empty}
-
+
% Redefine section commands to use less space
\makeatletter
@@ -117,26 +117,26 @@
including a list of the available flags.
\texttt{iyt}\textemdash\ Load yt and IPython. \\
-\texttt{yt load} {\it dataset} \textemdash\ Load a single dataset. \\
+\texttt{yt load} \textit{dataset} \textemdash\ Load a single dataset. \\
\texttt{yt help} \textemdash\ Print yt help information. \\
-\texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
+\texttt{yt stats} \textit{dataset} \textemdash\ Print stats of a dataset. \\
\texttt{yt update} \textemdash\ Update yt to most recent version.\\
\texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
\texttt{yt version} \textemdash\ yt installation information. \\
\texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
-\texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
-\texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
-\texttt{yt render} {\it dataset} \textemdash\ Create a simple
+\texttt{yt upload\_image} \textit{image.png} \textemdash\ Upload PNG image to imgur.com. \\
+\texttt{yt upload\_notebook} \textit{notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
+\texttt{yt plot} \textit{dataset} \textemdash\ Create a set of images.\\
+\texttt{yt render} \textit{dataset} \textemdash\ Create a simple
volume rendering. \\
-\texttt{yt mapserver} {\it dataset} \textemdash\ View a plot/projection in a Gmaps-like
+\texttt{yt mapserver} \textit{dataset} \textemdash\ View a plot/projection in a Gmaps-like
interface. \\
-\texttt{yt pastebin} {\it text.out} \textemdash\ Post text to the pastebin at
- paste.yt-project.org. \\
-\texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
+\texttt{yt pastebin} \textit{text.out} \textemdash\ Post text to the pastebin at
+ paste.yt-project.org. \\
+\texttt{yt pastebin\_grab} \textit{identifier} \textemdash\ Print content of pastebin to
STDOUT. \\
\texttt{yt bugreport} \textemdash\ Report a yt bug. \\
-\texttt{yt hop} {\it dataset} \textemdash\ Run hop on a dataset. \\
+\texttt{yt hop} \textit{dataset} \textemdash\ Run hop on a dataset. \\
\subsection{yt Imports}
In order to use yt, Python must load the relevant yt modules into memory.
@@ -144,15 +144,15 @@
used as part of a script.
\newlength{\MyLen}
\settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{import yt} \textemdash\
+\texttt{import yt} \textemdash\
Load yt. \\
-\texttt{from yt.config import ytcfg} \textemdash\
+\texttt{from yt.config import ytcfg} \textemdash\
Used to set yt configuration options.
If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered} \textemdash\
+\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered} \textemdash\
Load halo finding modules. Other modules
-are loaded in a similar way by swapping the
-{\em emphasized} text.
+are loaded in a similar way by swapping the
+\emph{emphasized} text.
See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
\subsection{YTArray}
@@ -163,32 +163,32 @@
very brief list of some useful ones.
\settowidth{\MyLen}{\texttt{multicol} }\\
\texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
-\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\
\texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
min value of \texttt{a}.\\
-\texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{v = a[}\textit{index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location \textit{index}.\\
+\texttt{b = a[}\textit{i:j}\texttt{]} \textemdash\ Select the slice of values from
\texttt{a} between
-locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
+locations \textit{i} to \textit{j-1} saved to a new Numpy array \texttt{b} with length \textit{j-i}. \\
\texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
\texttt{sel}, of the same shape as \texttt{a},
that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
elements from \texttt{a} that correspond to elements of \texttt{sel}
-that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
+that are \textit{True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
+\texttt{a.write\_hdf5(\textit{filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file \textit{filename.h5}.\\
\subsection{IPython Tips}
\settowidth{\MyLen}{\texttt{multicol} }
These tips work if IPython has been loaded, typically either by invoking
\texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}).
\texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a
-variable or function name when the \texttt{Tab} key is pressed, e.g. {\it HaloFi}\textendash\texttt{Tab} would auto-complete
-to {\it HaloFinder}. This also works with imports, e.g. {\it from numpy.random.}\textendash\texttt{Tab}
+variable or function name when the \texttt{Tab} key is pressed, e.g. \textit{HaloFi}\textendash\texttt{Tab} would auto-complete
+to \textit{HaloFinder}. This also works with imports, e.g. \textit{from numpy.random.}\textendash\texttt{Tab}
would give you a list of random functions (note the trailing period before hitting \texttt{Tab}).\\
\texttt{?, ??} \textemdash\ Appending one or two question marks at the end of any object gives you
-detailed information about it, e.g. {\it variable\_name}?.\\
+detailed information about it, e.g. \textit{variable\_name}?.\\
Below a few IPython ``magics'' are listed, which are IPython-specific shortcut commands.\\
\texttt{\%paste} \textemdash\ Paste content from the system clipboard into the IPython shell.\\
\texttt{\%hist} \textemdash\ Print recent command history.\\
@@ -204,40 +204,40 @@
\subsection{Load and Access Data}
The first step in using yt is to reference a simulation snapshot.
-After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
+After that, simulation data is generally accessed in yt using \textit{Data Containers} which are Python objects
that define a region of simulation space from which data should be selected.
\settowidth{\MyLen}{\texttt{multicol} }
-\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\ Reference a single snapshot.\\
+\texttt{ds = yt.load(}\textit{dataset}\texttt{)} \textemdash\ Reference a single snapshot.\\
\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+\texttt{a = dd[}\textit{field\_name}\texttt{]} \textemdash\ Copies the contents of \textit{field} into the
YTArray \texttt{a}. Similarly for other data containers.\\
\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
in the snapshot. \\
\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\ Create a spherical data
-container. {\it cen} may be a coordinate, or ``max'' which
-centers on the max density point. {\it radius} may be a float in
-code units or a tuple of ({\it length, unit}).\\
+\texttt{sp = ds.sphere(}\textit{cen}\texttt{,}\textit{radius}\texttt{)} \textemdash\ Create a spherical data
+container. \textit{cen} may be a coordinate, or ``max'' which
+centers on the max density point. \textit{radius} may be a float in
+code units or a tuple of (\textit{length, unit}).\\
-\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
-rectilinear data container. {\it cen} is required but not used.
-{\it left} and {\it right edge} are coordinate values that define the region.
+\texttt{re = ds.region(\textit{cen}, \textit{left edge}, \textit{right edge})} \textemdash\ Create a
+rectilinear data container. \textit{cen} is required but not used.
+\textit{left} and \textit{right edge} are coordinate values that define the region.
-\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\
-Create a cylindrical data container centered at {\it cen} along the
-direction set by {\it normal},with total length
- 2$\times${\it height} and with radius {\it radius}. \\
-
-\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{di = ds.disk(\textit{cen}, \textit{normal}, \textit{radius}, \textit{height})} \textemdash\
+Create a cylindrical data container centered at \textit{cen} along the
+direction set by \textit{normal},with total length
+ 2$\times$\textit{height} and with radius \textit{radius}. \\
+
+\texttt{ds.save\_object(sp, \textit{``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object(\textit{``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
\subsection{Defining New Fields}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory.
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory.
Field can either be created before a dataset is loaded using \texttt{add\_field}:
-\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{def \_metal\_mass(\textit{field},\textit{data})}\\
\texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
\texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
Or added to an existing dataset using \texttt{ds.add\_field}:
@@ -245,34 +245,34 @@
\subsection{Slices and Projections}
\settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
-\texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
+\texttt{slc = yt.SlicePlot(ds, \textit{axis or normal vector}, \textit{field}, \textit{center=}, \textit{width=}, \textit{weight\_field=}, \textit{additional parameters})} \textemdash\ Make a slice plot
+perpendicular to \textit{axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of \textit{field} weighted by \textit{weight\_field} at (code-units) \textit{center} with
+\textit{width} in code units or a (value, unit) tuple. Hint: try \textit{yt.SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
\texttt{.save()} works similarly for the commands below.\\
-\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
\subsection{Plot Annotations}
\settowidth{\MyLen}{\texttt{multicol} }
-Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a {\it plot\_args} dict that is fed to matplotlib annotator. \\
-\texttt{velocity({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
-\texttt{magnetic\_field({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
-\texttt{quiver({\it field\_x},{\it field\_y},{\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \\
-\texttt{contour({\it field=},{\it ncont=},{\it factor=},{\it clim=},{\it take\_log=}, {\it additional parameters})} \textemdash Plots a number of contours {\it ncont} to interpolate {\it field} optionally using {\it take\_log}, upper and lower {\it c}ontour{\it lim}its and {\it factor} number of points in the interpolation.\\
-\texttt{grids({\it alpha=}, {\it draw\_ids=}, {\it periodic=}, {\it min\_level=}, {\it max\_level=})} \textemdash Add grid boundaries. \\
-\texttt{streamlines({\it field\_x},{\it field\_y},{\it factor=},{\it density=})}\\
-\texttt{clumps({\it clumplist})} \textemdash\ Generate {\it clumplist} using the clump finder and plot. \\
-\texttt{arrow({\it pos}, {\it code\_size})} Add an arrow at a {\it pos}ition. \\
-\texttt{point({\it pos}, {\it text})} \textemdash\ Add text at a {\it pos}ition. \\
-\texttt{marker({\it pos}, {\it marker=})} \textemdash\ Add a matplotlib-defined marker at a {\it pos}ition. \\
-\texttt{sphere({\it center}, {\it radius}, {\it text=})} \textemdash\ Draw a circle and append {\it text}.\\
-\texttt{hop\_circles({\it hop\_output}, {\it max\_number=}, {\it annotate=}, {\it min\_size=}, {\it max\_size=}, {\it font\_size=}, {\it print\_halo\_size=}, {\it fixed\_radius=}, {\it min\_mass=}, {\it print\_halo\_mass=}, {\it width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles ({\it size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
-\texttt{hop\_particles({\it hop\_output},{\it max\_number=},{\it p\_size=},\\
-{\it min\_size},{\it alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
-\texttt{particles({\it width},{\it p\_size=},{\it col=}, {\it marker=}, {\it stride=}, {\it ptype=}, {\it stars\_only=}, {\it dm\_only=}, {\it minimum\_mass=}, {\it alpha=})} \textemdash\ Draw particles of {\it p\_size} pixels in a slab of {\it width} with {\it col}or using a matplotlib {\it marker} plotting only every {\it stride} number of particles.\\
-\texttt{title({\it text})}\\
+Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a \textit{plot\_args} dict that is fed to matplotlib annotator. \\
+\texttt{velocity(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
+\texttt{magnetic\_field(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
+\texttt{quiver(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \\
+\texttt{contour(\textit{field=},\textit{ncont=},\textit{factor=},\textit{clim=},\textit{take\_log=}, \textit{additional parameters})} \textemdash Plots a number of contours \textit{ncont} to interpolate \textit{field} optionally using \textit{take\_log}, upper and lower \textit{c}ontour\textit{lim}its and \textit{factor} number of points in the interpolation.\\
+\texttt{grids(\textit{alpha=}, \textit{draw\_ids=}, \textit{periodic=}, \textit{min\_level=}, \textit{max\_level=})} \textemdash Add grid boundaries. \\
+\texttt{streamlines(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{density=})}\\
+\texttt{clumps(\textit{clumplist})} \textemdash\ Generate \textit{clumplist} using the clump finder and plot. \\
+\texttt{arrow(\textit{pos}, \textit{code\_size})} Add an arrow at a \textit{pos}ition. \\
+\texttt{point(\textit{pos}, \textit{text})} \textemdash\ Add text at a \textit{pos}ition. \\
+\texttt{marker(\textit{pos}, \textit{marker=})} \textemdash\ Add a matplotlib-defined marker at a \textit{pos}ition. \\
+\texttt{sphere(\textit{center}, \textit{radius}, \textit{text=})} \textemdash\ Draw a circle and append \textit{text}.\\
+\texttt{hop\_circles(\textit{hop\_output}, \textit{max\_number=}, \textit{annotate=}, \textit{min\_size=}, \textit{max\_size=}, \textit{font\_size=}, \textit{print\_halo\_size=}, \textit{fixed\_radius=}, \textit{min\_mass=}, \textit{print\_halo\_mass=}, \textit{width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles (\textit{size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
+\texttt{hop\_particles(\textit{hop\_output},\textit{max\_number=},\textit{p\_size=},\\
+\textit{min\_size},\textit{alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
+\texttt{particles(\textit{width},\textit{p\_size=},\textit{col=}, \textit{marker=}, \textit{stride=}, \textit{ptype=}, \textit{stars\_only=}, \textit{dm\_only=}, \textit{minimum\_mass=}, \textit{alpha=})} \textemdash\ Draw particles of \textit{p\_size} pixels in a slab of \textit{width} with \textit{col}or using a matplotlib \textit{marker} plotting only every \textit{stride} number of particles.\\
+\texttt{title(\textit{text})}\\
\subsection{The $\sim$/.yt/ Directory}
\settowidth{\MyLen}{\texttt{multicol} }
@@ -297,12 +297,12 @@
\subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}}
+\settowidth{\MyLen}{\texttt{multicol}}
Nearly all of yt is parallelized using
-MPI. The {\it mpi4py} package must be installed for parallelism in yt. To
-install {\it pip install mpi4py} on the command line usually works.
+MPI\@. The \textit{mpi4py} package must be installed for parallelism in yt. To
+install \textit{pip install mpi4py} on the command line usually works.
Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py}\\
+\textit{mpirun -n 12 python script.py}\\
The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
turn on yt's parallelism. If this doesn't happen, all cores will execute the
same serial yt script. This command may differ for each system on which you use
@@ -320,12 +320,12 @@
\texttt{hg clone https://bitbucket.org/yt\_analysis/yt} \textemdash\ Clone a copy of yt. \\
\texttt{hg status} \textemdash\ Files changed in working directory.\\
\texttt{hg diff} \textemdash\ Print diff of all changed files in working directory. \\
-\texttt{hg diff -r{\it RevX} -r{\it RevY}} \textemdash\ Print diff of all changes between revision {\it RevX} and {\it RevY}.\\
+\texttt{hg diff -r\textit{RevX} -r\textit{RevY}} \textemdash\ Print diff of all changes between revision \textit{RevX} and \textit{RevY}.\\
\texttt{hg log} \textemdash\ History of changes.\\
-\texttt{hg cat -r{\it RevX file}} \textemdash\ Print the contents of {\it file} from revision {\it RevX}.\\
+\texttt{hg cat -r\textit{RevX file}} \textemdash\ Print the contents of \textit{file} from revision \textit{RevX}.\\
\texttt{hg heads} \textemdash\ Print all the current heads. \\
-\texttt{hg revert -r{\it RevX file}} \textemdash\ Revert {\it file} to revision {\it RevX}. On-disk changed version is
-moved to {\it file.orig}. \\
+\texttt{hg revert -r\textit{RevX file}} \textemdash\ Revert \textit{file} to revision \textit{RevX}. On-disk changed version is
+moved to \textit{file.orig}. \\
\texttt{hg commit} \textemdash\ Commit changes to repository. \\
\texttt{hg push} \textemdash\ Push changes to default remote repository. \\
\texttt{hg pull} \textemdash\ Pull changes from default remote repository. \\
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -1,394 +1,4 @@
-#
-# Hi there! Welcome to the yt installation script.
-#
-# This script is designed to create a fully isolated Python installation
-# with the dependencies you need to run yt.
-#
-# This script is based on Conda, a distribution mechanism from Continuum
-# Analytics. The process is as follows:
-#
-# 1. Download the appropriate Conda installation package
-# 2. Install Conda into the specified directory
-# 3. Install yt-specific dependencies
-# 4. Install yt
-#
-# There are a few options listed below, but by default, this will install
-# everything. At the end, it will tell you what to do to use yt.
-#
-# By default this will install yt from source.
-#
-# If you experience problems, please visit the Help section at
-# http://yt-project.org.
-#
-DEST_SUFFIX="yt-conda"
-DEST_DIR="`pwd`/${DEST_SUFFIX/ /}" # Installation location
-BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
-INST_UNSTRUCTURED=1 # Do we want to build with unstructured mesh support?
-
-##################################################################
-# #
-# You will likely not have to modify anything below this region. #
-# #
-##################################################################
-
-LOG_FILE="`pwd`/yt_install.log"
-
-# Here is the idiom for redirecting to the log file:
-# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
-MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="latest"
-YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
- if [ $INST_YT_SOURCE -eq 0 ]
- then
- echo "yt must be compiled from source to use the unstructured mesh support."
- echo "Please set INST_YT_SOURCE to 1 and re-run."
- exit 1
- fi
- if [ `uname` = "Darwin" ]
- then
- EMBREE="embree-2.8.0.x86_64.macosx"
- EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
- else
- EMBREE="embree-2.8.0.x86_64.linux"
- EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
- fi
- PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
-fi
-
-function do_exit
-{
- echo "********************************************"
- echo " FAILURE REPORT:"
- echo "********************************************"
- echo
- tail -n 10 ${LOG_FILE}
- echo
- echo "********************************************"
- echo "********************************************"
- echo "Failure. Check ${LOG_FILE}. The last 10 lines are above."
- exit 1
-}
-
-function log_cmd
-{
- echo "EXECUTING:" >> ${LOG_FILE}
- echo " $*" >> ${LOG_FILE}
- ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-# These are needed to prevent pushd and popd from printing to stdout
-
-function pushd () {
- command pushd "$@" > /dev/null
-}
-
-function popd () {
- command popd "$@" > /dev/null
-}
-
-function get_ytdata
-{
- echo "Downloading $1 from yt-project.org"
- [ -e $1 ] && return
- ${GETFILE} "http://yt-project.org/data/$1" || do_exit
- ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
-}
-
-function get_ytrecipe {
- RDIR=${DEST_DIR}/src/yt-recipes/$1
- mkdir -p ${RDIR}
- pushd ${RDIR}
- log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
- log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
- NEW_PKG=`conda build --output ${RDIR}`
- log_cmd conda build --no-binstar-upload ${RDIR}
- log_cmd conda install ${NEW_PKG}
- popd
-}
-
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "Hi there! This is the yt installation script. We're going to download"
-echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for yt to run within."
-echo
-echo "This will install Miniconda from Continuum Analytics, the necessary"
-echo "packages to run yt, and create a self-contained environment for you to"
-echo "use yt. Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes using the"
-echo "'conda install' command."
-echo
-MYOS=`uname -s` # A guess at the OS
-if [ $INST_YT_SOURCE -ne 0 ]
-then
- if [ "${MYOS##Darwin}" != "${MYOS}" ]
- then
- echo "Looks like you're running on Mac OSX."
- echo
- echo "NOTE: you must have the Xcode command line tools installed."
- echo
- echo "The instructions for obtaining these tools varies according"
- echo "to your exact OS version. On older versions of OS X, you"
- echo "must register for an account on the apple developer tools"
- echo "website: https://developer.apple.com/downloads to obtain the"
- echo "download link."
- echo
- echo "We have gathered some additional instructions for each"
- echo "version of OS X below. If you have trouble installing yt"
- echo "after following these instructions, don't hesitate to contact"
- echo "the yt user's e-mail list."
- echo
- echo "You can see which version of OSX you are running by clicking"
- echo "'About This Mac' in the apple menu on the left hand side of"
- echo "menu bar. We're assuming that you've installed all operating"
- echo "system updates; if you have an older version, we suggest"
- echo "running software update and installing all available updates."
- echo
- echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
- echo "Apple developer tools website."
- echo
- echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
- echo "developer tools website. You can either download the"
- echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
- echo "Software Update to update to XCode 3.2.6 or"
- echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
- echo "bundle (4.1 GB)."
- echo
- echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
- echo "(search for Xcode)."
- echo "Alternatively, download the Xcode command line tools from"
- echo "the Apple developer tools website."
- echo
- echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
- echo "download the appropriate version of Xcode from the"
- echo "mac app store (search for Xcode)."
- echo
- echo "Additionally, you will have to manually install the Xcode"
- echo "command line tools."
- echo
- echo "For OS X 10.8, see:"
- echo "http://stackoverflow.com/questions/9353444"
- echo
- echo "For OS X 10.9 and newer the command line tools can be installed"
- echo "with the following command:"
- echo " xcode-select --install"
- fi
- if [ "${MYOS##Linux}" != "${MYOS}" ]
- then
- echo "Looks like you're on Linux."
- echo
- echo "Please make sure you have the developer tools for your OS "
- echo "installed."
- echo
- if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
- then
- echo "Looks like you're on an OpenSUSE-compatible machine."
- echo
- echo "You need to have these packages installed:"
- echo
- echo " * devel_C_C++"
- echo " * libuuid-devel"
- echo " * gcc-c++"
- echo " * chrpath"
- echo
- echo "You can accomplish this by executing:"
- echo
- echo "$ sudo zypper install -t pattern devel_C_C++"
- echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
- echo "$ sudo zypper install chrpath"
- fi
- if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
- then
- echo "Looks like you're on an Ubuntu-compatible machine."
- echo
- echo "You need to have these packages installed:"
- echo
- echo " * libssl-dev"
- echo " * build-essential"
- echo " * libncurses5"
- echo " * libncurses5-dev"
- echo " * uuid-dev"
- echo " * chrpath"
- echo
- echo "You can accomplish this by executing:"
- echo
- echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
- echo
- fi
- echo
- echo "If you are running on a supercomputer or other module-enabled"
- echo "system, please make sure that the GNU module has been loaded."
- echo
- fi
-fi
-if [ "${MYOS##x86_64}" != "${MYOS}" ]
-then
- MINICONDA_OS="Linux-x86_64"
-elif [ "${MYOS##i386}" != "${MYOS}" ]
-then
- MINICONDA_OS="Linux-x86"
-elif [ "${MYOS##Darwin}" != "${MYOS}" ]
-then
- MINICONDA_OS="MacOSX-x86_64"
-else
- echo "Not sure which Linux distro you are running."
- echo "Going with x86_64 architecture."
- MINICONDA_OS="Linux-x86_64"
-fi
-echo
-echo "If you'd rather not continue, hit Ctrl-C."
-echo
-echo "========================================================================"
-echo
-read -p "[hit enter] "
-echo
-echo "Awesome! Here we go."
-echo
-
-MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
-
-if type -P wget &>/dev/null
-then
- echo "Using wget"
- export GETFILE="wget -nv -nc"
-else
- echo "Using curl"
- export GETFILE="curl -sSO"
-fi
-
-echo
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
-echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
-echo
-
-${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
-
-echo "Installing the Miniconda python environment."
-
-log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
-
-# This we *do* need.
-export PATH=${DEST_DIR}/bin:$PATH
-
-echo "Installing the necessary packages for yt."
-echo "This may take a while, but don't worry. yt loves you."
-
-declare -a YT_DEPS
-YT_DEPS+=('python')
-YT_DEPS+=('setuptools')
-YT_DEPS+=('numpy')
-YT_DEPS+=('jupyter')
-YT_DEPS+=('ipython')
-YT_DEPS+=('sphinx')
-YT_DEPS+=('h5py')
-YT_DEPS+=('matplotlib')
-YT_DEPS+=('cython')
-YT_DEPS+=('nose')
-YT_DEPS+=('conda-build')
-YT_DEPS+=('mercurial')
-YT_DEPS+=('sympy')
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
- YT_DEPS+=('netcdf4')
-fi
-
-# Here is our dependency list for yt
-log_cmd conda update --yes conda
-
-log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-for YT_DEP in "${YT_DEPS[@]}"; do
- echo "Installing $YT_DEP"
- log_cmd conda install --yes ${YT_DEP}
-done
-
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
-
- echo "Installing embree"
- mkdir ${DEST_DIR}/src
- cd ${DEST_DIR}/src
- ( ${GETFILE} "$EMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
- log_cmd tar xfz ${EMBREE}.tar.gz
- log_cmd mv ${DEST_DIR}/src/${EMBREE}/include/embree2 ${DEST_DIR}/include
- log_cmd mv ${DEST_DIR}/src/${EMBREE}/lib/lib*.* ${DEST_DIR}/lib
- if [ `uname` = "Darwin" ]
- then
- ln -s ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.dylib
- install_name_tool -id ${DEST_DIR}/lib/libembree.2.dylib ${DEST_DIR}/lib/libembree.2.dylib
- else
- ln -s ${DEST_DIR}/lib/libembree.so.2 ${DEST_DIR}/lib/libembree.so
- fi
-
- echo "Installing pyembree from source"
- ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
- log_cmd unzip ${DEST_DIR}/src/master.zip
- pushd ${DEST_DIR}/src/pyembree-master
- log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
- popd
-fi
-
-if [ $INST_YT_SOURCE -eq 0 ]
-then
- echo "Installing yt"
- log_cmd conda install --yes yt
-else
- # We do a source install.
- echo "Installing yt from source"
- YT_DIR="${DEST_DIR}/src/yt-hg"
- log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-if [ $INST_UNSTRUCTURED -eq 1 ]
-then
- echo $DEST_DIR > ${YT_DIR}/embree.cfg
-fi
- pushd ${YT_DIR}
- log_cmd python setup.py develop
- popd
-fi
-
-echo
-echo
-echo "========================================================================"
-echo
-echo "yt and the Conda system are now installed in $DEST_DIR ."
-echo
-echo "You must now modify your PATH variable by prepending:"
-echo
-echo " $DEST_DIR/bin"
-echo
-echo "On Bash-style shells you can copy/paste the following command to "
-echo "temporarily activate the yt installation:"
-echo
-echo " export PATH=$DEST_DIR/bin:\$PATH"
-echo
-echo "and on csh-style shells:"
-echo
-echo " setenv PATH $DEST_DIR/bin:\$PATH"
-echo
-echo "You can also update the init file appropriate for your shell to include"
-echo "the same command."
-echo
-echo "To get started with yt, check out the orientation:"
-echo
-echo " http://yt-project.org/doc/orientation/"
-echo
-echo "For support, see the website and join the mailing list:"
-echo
-echo " http://yt-project.org/"
-echo " http://yt-project.org/data/ (Sample data)"
-echo " http://yt-project.org/doc/ (Docs)"
-echo
-echo " http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
-echo
-echo "========================================================================"
-echo
-echo "Oh, look at me, still talking when there's science to do!"
-echo "Good luck, and email the user list if you run into any problems."
+echo "This script has been deprecated."
+echo "You can now create a conda-based build using install_script.sh"
+echo "Please download that script and run it"
+exit 0
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/helper_scripts/code_support.py
--- a/doc/helper_scripts/code_support.py
+++ b/doc/helper_scripts/code_support.py
@@ -85,7 +85,7 @@
print("|| . ||", end=' ')
for c in code_names:
print("%s || " % (c), end=' ')
-print()
+print()
for vn in vals:
print("|| !%s ||" % (vn), end=' ')
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -19,7 +19,7 @@
CWD = os.getcwd()
ytcfg["yt", "serialize"] = "False"
PARALLEL_TEST = {"rockstar_nest": "3"}
-BLACKLIST = []
+BLACKLIST = ["opengl_ipython", "opengl_vr"]
def prep_dirs():
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/helper_scripts/table.py
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -44,7 +44,7 @@
"A bunch of illustrated examples of how to do things"),
("reference/index.html", "Reference Materials",
"A list of all bundled fields, API documentation, the Change Log..."),
- ("faq/index.html", "FAQ",
+ ("faq/index.html", "FAQ",
"Frequently Asked Questions: answered for you!")
]),
]
diff -r 0f8594985133be6e157bde32b5980ca2a8cba13f -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -66,7 +66,7 @@
written = cond_output(output, written)
ofn = "%s/%s_%s" % (ndir, fn, os.path.basename(ifn))
open(ofn, "wb").write(open(ifn, "rb").read())
- output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
+ output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
" :width: 240\n" +
" :target: ../_images/%s_%s\n" % (fn, os.path.basename(ifn))
)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/a7b7b8765fc3/
Changeset: a7b7b8765fc3
Branch: yt
User: qobilidop
Date: 2016-06-08 22:35:29+00:00
Summary: Change 'ptype' to 'index_ptype' to avoid naming conflict.
Affected #: 5 files
diff -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -75,7 +75,7 @@
additional_fields=(),
unit_base=None, n_ref=64,
over_refine_factor=1,
- ptype="all",
+ index_ptype="all",
bounding_box = None,
header_spec = "default",
field_spec = "default",
@@ -91,7 +91,7 @@
ptype_spec, gadget_ptype_specs)
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
- self.ptype = ptype
+ self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -345,7 +345,7 @@
def __init__(self, filename, dataset_type="gadget_hdf5",
unit_base = None, n_ref=64,
over_refine_factor=1,
- ptype="all",
+ index_ptype="all",
bounding_box = None,
units_override=None,
unit_system="cgs"):
@@ -356,8 +356,7 @@
"Use unit_base instead.")
super(GadgetHDF5Dataset, self).__init__(
filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
- over_refine_factor=over_refine_factor,
- ptype=ptype,
+ over_refine_factor=over_refine_factor, index_ptype=index_ptype,
bounding_box = bounding_box, unit_system=unit_system)
def _get_hvals(self):
diff -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -123,14 +123,14 @@
f.close()
def _initialize_index(self, data_file, regions):
- ptype = self.ptype
+ index_ptype = self.index_ptype
f = _get_h5_handle(data_file.filename)
- if ptype == "all":
+ if index_ptype == "all":
pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
keys = f.keys()
else:
pcount = f["/Header"].attrs["NumPart_ThisFile"][int(ptype[-1])]
- keys = [ptype]
+ keys = [index_ptype]
morton = np.empty(pcount, dtype='uint64')
ind = 0
for key in keys:
diff -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1021,13 +1021,13 @@
filename_template = "stream_file"
n_ref = 64
over_refine_factor = 1
- ptype = "all"
+ index_ptype = "all"
def load_particles(data, length_unit = None, bbox=None,
sim_time=0.0, mass_unit = None, time_unit = None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
- n_ref = 64, over_refine_factor = 1, ptype = "all",
+ n_ref = 64, over_refine_factor = 1, index_ptype ="all",
geometry = "cartesian", unit_system="cgs"):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -1150,7 +1150,7 @@
sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system)
sds.n_ref = n_ref
sds.over_refine_factor = over_refine_factor
- sds.ptype = ptype
+ sds.index_ptype = index_ptype
return sds
diff -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -167,7 +167,7 @@
def _initialize_index(self, data_file, regions):
# self.fields[g.id][fname] is the pattern here
- index_ptype = self.ptype
+ index_ptype = self.index_ptype
if index_ptype == "all":
ptypes = self.ds.particle_types_raw
else:
diff -r 22fe8c96f2e7b0a741277a86221a4d25d4b55f60 -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -39,9 +39,9 @@
super(ParticleIndex, self).__init__(ds, dataset_type)
@property
- def ptype(self):
- if hasattr(self.dataset, "ptype"):
- return self.dataset.ptype
+ def index_ptype(self):
+ if hasattr(self.dataset, "index_ptype"):
+ return self.dataset.index_ptype
else:
return "all"
@@ -76,13 +76,13 @@
cls = self.dataset._file_class
self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
for i in range(ndoms)]
- ptype = self.ptype
- if ptype == "all":
+ index_ptype = self.index_ptype
+ if index_ptype == "all":
self.total_particles = sum(
sum(d.total_particles.values()) for d in self.data_files)
else:
self.total_particles = sum(
- d.total_particles[ptype] for d in self.data_files)
+ d.total_particles[index_ptype] for d in self.data_files)
ds = self.dataset
self.oct_handler = ParticleOctreeContainer(
[1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
@@ -90,7 +90,7 @@
self.oct_handler.n_ref = ds.n_ref
only_on_root(mylog.info, "Allocating for %0.3e particles "
"(particle type '%s')",
- self.total_particles, ptype)
+ self.total_particles, index_ptype)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
self.regions = ParticleRegions(
@@ -114,17 +114,17 @@
# * Broadcast back a serialized octree to join
#
# For now we will do this in serial.
- ptype = self.ptype
- # Set the ptype attribute of self.io dynamically here, so we don't
+ index_ptype = self.index_ptype
+ # Set the index_ptype attribute of self.io dynamically here, so we don't
# need to assume that the dataset has the attribute.
- self.io.ptype = ptype
+ self.io.index_ptype = index_ptype
morton = np.empty(self.total_particles, dtype="uint64")
ind = 0
for data_file in self.data_files:
- if ptype == "all":
+ if index_ptype == "all":
npart = sum(data_file.total_particles.values())
else:
- npart = data_file.total_particles[ptype]
+ npart = data_file.total_particles[index_ptype]
morton[ind:ind + npart] = \
self.io._initialize_index(data_file, self.regions)
ind += npart
https://bitbucket.org/yt_analysis/yt/commits/3a36efe38b15/
Changeset: 3a36efe38b15
Branch: yt
User: qobilidop
Date: 2016-06-21 21:06:46+00:00
Summary: Undo changes to StreamParticlesDataset
Affected #: 2 files
diff -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a -r 3a36efe38b1531922dc80bbc710c432d9cf818cc yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -400,13 +400,13 @@
Assign particle data to the grids using MatchPointsToGrids. This
will overwrite any existing particle data, so be careful!
"""
-
+
# Note: what we need to do here is a bit tricky. Because occasionally this
# gets called before we property handle the field detection, we cannot use
# any information about the index. Fortunately for us, we can generate
# most of the GridTree utilizing information we already have from the
# stream handler.
-
+
if len(ds.stream_handler.fields) > 1:
if ("io", "particle_position_x") in pdata:
@@ -425,7 +425,7 @@
np.equal(parent_ids, i, mask)
num_children[i] = mask.sum()
levels = ds.stream_handler.levels.astype("int64").ravel()
- grid_tree = GridTree(num_grids,
+ grid_tree = GridTree(num_grids,
ds.stream_handler.left_edges,
ds.stream_handler.right_edges,
ds.stream_handler.dimensions,
@@ -443,8 +443,8 @@
out=particle_indices[1:])
else :
particle_indices[1] = particle_grid_count.squeeze()
-
- pdata.pop("number_of_particles", None)
+
+ pdata.pop("number_of_particles", None)
grid_pdata = []
for i, pcount in enumerate(particle_grid_count):
grid = {}
@@ -457,12 +457,12 @@
else :
grid_pdata = [pdata]
-
+
for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
ds.stream_handler.fields[gi].update(pd)
npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
ds.stream_handler.particle_count[gi] = npart
-
+
def unitify_data(data):
new_data, field_units = {}, {}
for field, val in data.items():
@@ -507,7 +507,7 @@
# At this point, we have arrays for all our fields
new_data = {}
for field in data:
- if isinstance(field, tuple):
+ if isinstance(field, tuple):
new_field = field
elif len(data[field].shape) in (1, 2):
new_field = ("io", field)
@@ -580,7 +580,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
Examples
@@ -782,7 +782,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
refine_by : integer
Specifies the refinement ratio between levels. Defaults to 2.
@@ -998,7 +998,7 @@
class StreamParticleIndex(ParticleIndex):
-
+
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
super(StreamParticleIndex, self).__init__(ds, dataset_type)
@@ -1021,14 +1021,13 @@
filename_template = "stream_file"
n_ref = 64
over_refine_factor = 1
- index_ptype = "all"
def load_particles(data, length_unit = None, bbox=None,
sim_time=0.0, mass_unit = None, time_unit = None,
velocity_unit=None, magnetic_unit=None,
periodicity=(True, True, True),
- n_ref = 64, over_refine_factor = 1, index_ptype ="all",
- geometry = "cartesian", unit_system="cgs"):
+ n_ref = 64, over_refine_factor = 1, geometry = "cartesian",
+ unit_system="cgs"):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -1042,7 +1041,7 @@
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
-
+
Parameters
----------
data : dict
@@ -1092,7 +1091,7 @@
field_units, data = unitify_data(data)
sfh = StreamDictFieldHandler()
-
+
pdata = {}
for key in data.keys() :
if not isinstance(key, tuple):
@@ -1150,7 +1149,6 @@
sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system)
sds.n_ref = n_ref
sds.over_refine_factor = over_refine_factor
- sds.index_ptype = index_ptype
return sds
@@ -1194,7 +1192,7 @@
array([[-1. , -1. , -1. ],
[-1. , -1. , -0.25],
[-1. , -1. , 0. ],
- ...,
+ ...,
[ 1. , 1. , 0. ],
[ 1. , 1. , 0.25],
[ 1. , 1. , 1. ]])
@@ -1272,7 +1270,7 @@
Particle fields are detected as one-dimensional fields. The number of particles
is set by the "number_of_particles" key in data.
-
+
Parameters
----------
data : dict
@@ -1305,7 +1303,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
"""
@@ -1320,9 +1318,9 @@
field_units, data = unitify_data(data)
sfh = StreamDictFieldHandler()
-
+
particle_types = set_particle_types(data)
-
+
sfh.update({'connectivity': connectivity,
'coordinates': coordinates,
0: data})
@@ -1406,7 +1404,7 @@
dest.update((field, np.empty(cell_count, dtype="float64"))
for field in content)
# Make references ...
- count = oct_handler.fill_level(0, levels, cell_inds, file_inds,
+ count = oct_handler.fill_level(0, levels, cell_inds, file_inds,
dest, content, offset)
return count
@@ -1495,7 +1493,7 @@
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
-
+
Parameters
----------
octree_mask : np.ndarray[uint8_t]
diff -r a7b7b8765fc38dd189649a78ff957ef8f9ceb03a -r 3a36efe38b1531922dc80bbc710c432d9cf818cc yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -167,13 +167,8 @@
def _initialize_index(self, data_file, regions):
# self.fields[g.id][fname] is the pattern here
- index_ptype = self.index_ptype
- if index_ptype == "all":
- ptypes = self.ds.particle_types_raw
- else:
- ptypes = [index_ptype]
morton = []
- for ptype in ptypes:
+ for ptype in self.ds.particle_types_raw:
try:
pos = np.column_stack(self.fields[data_file.filename][
(ptype, "particle_position_%s" % ax)] for ax in 'xyz')
https://bitbucket.org/yt_analysis/yt/commits/22529ac8df9f/
Changeset: 22529ac8df9f
Branch: yt
User: qobilidop
Date: 2016-06-21 22:54:11+00:00
Summary: Fix a type and shorten that line.
Affected #: 1 file
diff -r 3a36efe38b1531922dc80bbc710c432d9cf818cc -r 22529ac8df9f0eae2cebb12a6f6a4b2b372e3ad2 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -129,7 +129,8 @@
pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
keys = f.keys()
else:
- pcount = f["/Header"].attrs["NumPart_ThisFile"][int(ptype[-1])]
+ pt = int(index_ptype[-1])
+ pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
keys = [index_ptype]
morton = np.empty(pcount, dtype='uint64')
ind = 0
https://bitbucket.org/yt_analysis/yt/commits/5e6df67765c8/
Changeset: 5e6df67765c8
Branch: yt
User: qobilidop
Date: 2016-06-21 22:54:44+00:00
Summary: Change log message.
Affected #: 1 file
diff -r 22529ac8df9f0eae2cebb12a6f6a4b2b372e3ad2 -r 5e6df67765c838271032170c66ac961b498391f7 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -89,7 +89,7 @@
over_refine = ds.over_refine_factor)
self.oct_handler.n_ref = ds.n_ref
only_on_root(mylog.info, "Allocating for %0.3e particles "
- "(particle type '%s')",
+ "(index particle type '%s')",
self.total_particles, index_ptype)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
https://bitbucket.org/yt_analysis/yt/commits/42da1fd6f5ed/
Changeset: 42da1fd6f5ed
Branch: yt
User: qobilidop
Date: 2016-06-21 22:57:01+00:00
Summary: Add a test for index ptype (the OWLS case).
Affected #: 1 file
diff -r 5e6df67765c838271032170c66ac961b498391f7 -r 42da1fd6f5ed3704ac438d468c4feec72068f05f yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -144,6 +144,21 @@
cv2 = dd2["cell_volume"].sum(dtype="float64")
yield assert_equal, cv1, cv2
+index_ptype_snap = "snapshot_033/snap_033.0.hdf5"
+ at requires_file(index_ptype_snap)
+def test_particle_index_ptype():
+ ds = yt.load(index_ptype_snap)
+ ds_all = yt.load(index_ptype_snap, index_ptype="all")
+ ds_pt0 = yt.load(index_ptype_snap, index_ptype="PartType0")
+ dd = ds.all_data()
+ dd_all = ds_all.all_data()
+ dd_pt0 = ds_pt0.all_data()
+ cv = dd["cell_volume"]
+ cv_all = dd_all["cell_volume"]
+ cv_pt0 = dd_pt0["cell_volume"]
+ yield assert_equal, cv.shape, cv_all.shape
+ yield assert_equal, cv.sum(dtype="float64"), cv_pt0.sum(dtype="float64")
+
class FakeDS:
domain_left_edge = None
domain_right_edge = None
https://bitbucket.org/yt_analysis/yt/commits/3f4b38d79660/
Changeset: 3f4b38d79660
Branch: yt
User: qobilidop
Date: 2016-06-29 21:59:51+00:00
Summary: Add documentation for this feature.
Affected #: 1 file
diff -r 42da1fd6f5ed3704ac438d468c4feec72068f05f -r 3f4b38d79660030e4bcce29d119a4d9431d68ecd doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -884,6 +884,21 @@
It's recommended that if you want higher-resolution, try reducing the value of
``n_ref`` to 32 or 16.
+Also yt can be set to generate the global mesh index according to a specific
+type of particles instead of all the particles through the parameter
+``index_ptype``. For example, to build the octree only according to the
+``"PartType0"`` particles, you can do:
+
+.. code-block:: python
+
+ ds = yt.load("snapshot_061.hdf5", index_ptype='PartType0')
+
+By default, ``index_ptype`` is set to ``"all"``, which means all the particles.
+Currently this feature only works for the Gadget HDF5 and OWLS datasets. To
+bring the feature to other frontends, it's recommended to refer to this
+`PR <https://bitbucket.org/yt_analysis/yt/pull-requests/1985/add-particle-type-aware-octree/diff>`_
+for implementation details.
+
.. _gadget-field-spec:
Field Specifications
https://bitbucket.org/yt_analysis/yt/commits/f340fedc4a2e/
Changeset: f340fedc4a2e
Branch: yt
User: qobilidop
Date: 2016-06-29 22:07:27+00:00
Summary: Fix a quotation mark consistency.
Affected #: 1 file
diff -r 3f4b38d79660030e4bcce29d119a4d9431d68ecd -r f340fedc4a2eac562374e282e922fcf216d97a71 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -891,7 +891,7 @@
.. code-block:: python
- ds = yt.load("snapshot_061.hdf5", index_ptype='PartType0')
+ ds = yt.load("snapshot_061.hdf5", index_ptype="PartType0")
By default, ``index_ptype`` is set to ``"all"``, which means all the particles.
Currently this feature only works for the Gadget HDF5 and OWLS datasets. To
https://bitbucket.org/yt_analysis/yt/commits/98f754270a40/
Changeset: 98f754270a40
Branch: yt
User: ngoldbaum
Date: 2016-07-09 21:49:09+00:00
Summary: Merged in qobilidop/yt (pull request #1985)
Add particle type aware octree construction
Affected #: 7 files
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -884,6 +884,21 @@
It's recommended that if you want higher-resolution, try reducing the value of
``n_ref`` to 32 or 16.
+Also yt can be set to generate the global mesh index according to a specific
+type of particles instead of all the particles through the parameter
+``index_ptype``. For example, to build the octree only according to the
+``"PartType0"`` particles, you can do:
+
+.. code-block:: python
+
+ ds = yt.load("snapshot_061.hdf5", index_ptype="PartType0")
+
+By default, ``index_ptype`` is set to ``"all"``, which means all the particles.
+Currently this feature only works for the Gadget HDF5 and OWLS datasets. To
+bring the feature to other frontends, it's recommended to refer to this
+`PR <https://bitbucket.org/yt_analysis/yt/pull-requests/1985/add-particle-type-aware-octree/diff>`_
+for implementation details.
+
.. _gadget-field-spec:
Field Specifications
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -75,6 +75,7 @@
additional_fields=(),
unit_base=None, n_ref=64,
over_refine_factor=1,
+ index_ptype="all",
bounding_box = None,
header_spec = "default",
field_spec = "default",
@@ -90,6 +91,7 @@
ptype_spec, gadget_ptype_specs)
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
+ self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -343,6 +345,7 @@
def __init__(self, filename, dataset_type="gadget_hdf5",
unit_base = None, n_ref=64,
over_refine_factor=1,
+ index_ptype="all",
bounding_box = None,
units_override=None,
unit_system="cgs"):
@@ -353,7 +356,7 @@
"Use unit_base instead.")
super(GadgetHDF5Dataset, self).__init__(
filename, dataset_type, unit_base=unit_base, n_ref=n_ref,
- over_refine_factor=over_refine_factor,
+ over_refine_factor=over_refine_factor, index_ptype=index_ptype,
bounding_box = bounding_box, unit_system=unit_system)
def _get_hvals(self):
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -123,11 +123,18 @@
f.close()
def _initialize_index(self, data_file, regions):
+ index_ptype = self.index_ptype
f = _get_h5_handle(data_file.filename)
- pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+ if index_ptype == "all":
+ pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+ keys = f.keys()
+ else:
+ pt = int(index_ptype[-1])
+ pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
+ keys = [index_ptype]
morton = np.empty(pcount, dtype='uint64')
ind = 0
- for key in f.keys():
+ for key in keys:
if not key.startswith("PartType"): continue
if "Coordinates" not in f[key]: continue
ds = f[key]["Coordinates"]
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -299,7 +299,7 @@
name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
from yt.data_objects.static_output import _cached_datasets
_cached_datasets[name] = self
- Dataset.__init__(self, name, self._dataset_type,
+ Dataset.__init__(self, name, self._dataset_type,
unit_system=unit_system)
def _parse_parameter_file(self):
@@ -400,13 +400,13 @@
Assign particle data to the grids using MatchPointsToGrids. This
will overwrite any existing particle data, so be careful!
"""
-
+
# Note: what we need to do here is a bit tricky. Because occasionally this
# gets called before we property handle the field detection, we cannot use
# any information about the index. Fortunately for us, we can generate
# most of the GridTree utilizing information we already have from the
# stream handler.
-
+
if len(ds.stream_handler.fields) > 1:
if ("io", "particle_position_x") in pdata:
@@ -425,7 +425,7 @@
np.equal(parent_ids, i, mask)
num_children[i] = mask.sum()
levels = ds.stream_handler.levels.astype("int64").ravel()
- grid_tree = GridTree(num_grids,
+ grid_tree = GridTree(num_grids,
ds.stream_handler.left_edges,
ds.stream_handler.right_edges,
ds.stream_handler.dimensions,
@@ -443,8 +443,8 @@
out=particle_indices[1:])
else :
particle_indices[1] = particle_grid_count.squeeze()
-
- pdata.pop("number_of_particles", None)
+
+ pdata.pop("number_of_particles", None)
grid_pdata = []
for i, pcount in enumerate(particle_grid_count):
grid = {}
@@ -457,12 +457,12 @@
else :
grid_pdata = [pdata]
-
+
for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
ds.stream_handler.fields[gi].update(pd)
npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
ds.stream_handler.particle_count[gi] = npart
-
+
def unitify_data(data):
new_data, field_units = {}, {}
for field, val in data.items():
@@ -507,7 +507,7 @@
# At this point, we have arrays for all our fields
new_data = {}
for field in data:
- if isinstance(field, tuple):
+ if isinstance(field, tuple):
new_field = field
elif len(data[field].shape) in (1, 2):
new_field = ("io", field)
@@ -580,7 +580,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
Examples
@@ -782,7 +782,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
refine_by : integer
Specifies the refinement ratio between levels. Defaults to 2.
@@ -998,7 +998,7 @@
class StreamParticleIndex(ParticleIndex):
-
+
def __init__(self, ds, dataset_type = None):
self.stream_handler = ds.stream_handler
super(StreamParticleIndex, self).__init__(ds, dataset_type)
@@ -1041,7 +1041,7 @@
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
-
+
Parameters
----------
data : dict
@@ -1091,7 +1091,7 @@
field_units, data = unitify_data(data)
sfh = StreamDictFieldHandler()
-
+
pdata = {}
for key in data.keys() :
if not isinstance(key, tuple):
@@ -1192,7 +1192,7 @@
array([[-1. , -1. , -1. ],
[-1. , -1. , -0.25],
[-1. , -1. , 0. ],
- ...,
+ ...,
[ 1. , 1. , 0. ],
[ 1. , 1. , 0.25],
[ 1. , 1. , 1. ]])
@@ -1270,7 +1270,7 @@
Particle fields are detected as one-dimensional fields. The number of particles
is set by the "number_of_particles" key in data.
-
+
Parameters
----------
data : dict
@@ -1303,7 +1303,7 @@
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
- can be done for other coordinates, for instance:
+ can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
"""
@@ -1318,9 +1318,9 @@
field_units, data = unitify_data(data)
sfh = StreamDictFieldHandler()
-
+
particle_types = set_particle_types(data)
-
+
sfh.update({'connectivity': connectivity,
'coordinates': coordinates,
0: data})
@@ -1404,7 +1404,7 @@
dest.update((field, np.empty(cell_count, dtype="float64"))
for field in content)
# Make references ...
- count = oct_handler.fill_level(0, levels, cell_inds, file_inds,
+ count = oct_handler.fill_level(0, levels, cell_inds, file_inds,
dest, content, offset)
return count
@@ -1493,7 +1493,7 @@
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
-
+
Parameters
----------
octree_mask : np.ndarray[uint8_t]
@@ -1826,7 +1826,7 @@
sds._node_fields = node_data[0].keys()
sds._elem_fields = elem_data[0].keys()
- sds.default_field = [f for f in sds.field_list
+ sds.default_field = [f for f in sds.field_list
if f[0] == 'connect1'][-1]
return sds
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -38,6 +38,13 @@
self.float_type = np.float64
super(ParticleIndex, self).__init__(ds, dataset_type)
+ @property
+ def index_ptype(self):
+ if hasattr(self.dataset, "index_ptype"):
+ return self.dataset.index_ptype
+ else:
+ return "all"
+
def _setup_geometry(self):
mylog.debug("Initializing Particle Geometry Handler.")
self._initialize_particle_handler()
@@ -69,14 +76,21 @@
cls = self.dataset._file_class
self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
for i in range(ndoms)]
- self.total_particles = sum(
- sum(d.total_particles.values()) for d in self.data_files)
+ index_ptype = self.index_ptype
+ if index_ptype == "all":
+ self.total_particles = sum(
+ sum(d.total_particles.values()) for d in self.data_files)
+ else:
+ self.total_particles = sum(
+ d.total_particles[index_ptype] for d in self.data_files)
ds = self.dataset
self.oct_handler = ParticleOctreeContainer(
[1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
over_refine = ds.over_refine_factor)
self.oct_handler.n_ref = ds.n_ref
- only_on_root(mylog.info, "Allocating for %0.3e particles", self.total_particles)
+ only_on_root(mylog.info, "Allocating for %0.3e particles "
+ "(index particle type '%s')",
+ self.total_particles, index_ptype)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
self.regions = ParticleRegions(
@@ -100,10 +114,17 @@
# * Broadcast back a serialized octree to join
#
# For now we will do this in serial.
+ index_ptype = self.index_ptype
+ # Set the index_ptype attribute of self.io dynamically here, so we don't
+ # need to assume that the dataset has the attribute.
+ self.io.index_ptype = index_ptype
morton = np.empty(self.total_particles, dtype="uint64")
ind = 0
for data_file in self.data_files:
- npart = sum(data_file.total_particles.values())
+ if index_ptype == "all":
+ npart = sum(data_file.total_particles.values())
+ else:
+ npart = data_file.total_particles[index_ptype]
morton[ind:ind + npart] = \
self.io._initialize_index(data_file, self.regions)
ind += npart
diff -r 142714a92cbb18866fbdb7b94cf064c10be848ad -r 98f754270a409f2798b0abe8cd8db13021ac9743 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -144,6 +144,21 @@
cv2 = dd2["cell_volume"].sum(dtype="float64")
yield assert_equal, cv1, cv2
+index_ptype_snap = "snapshot_033/snap_033.0.hdf5"
+ at requires_file(index_ptype_snap)
+def test_particle_index_ptype():
+ ds = yt.load(index_ptype_snap)
+ ds_all = yt.load(index_ptype_snap, index_ptype="all")
+ ds_pt0 = yt.load(index_ptype_snap, index_ptype="PartType0")
+ dd = ds.all_data()
+ dd_all = ds_all.all_data()
+ dd_pt0 = ds_pt0.all_data()
+ cv = dd["cell_volume"]
+ cv_all = dd_all["cell_volume"]
+ cv_pt0 = dd_pt0["cell_volume"]
+ yield assert_equal, cv.shape, cv_all.shape
+ yield assert_equal, cv.sum(dtype="float64"), cv_pt0.sum(dtype="float64")
+
class FakeDS:
domain_left_edge = None
domain_right_edge = None
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list