aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/cython/Cython/Compiler
diff options
context:
space:
mode:
authoralexv-smirnov <alex@ydb.tech>2023-06-13 11:05:01 +0300
committeralexv-smirnov <alex@ydb.tech>2023-06-13 11:05:01 +0300
commitbf0f13dd39ee3e65092ba3572bb5b1fcd125dcd0 (patch)
tree1d1df72c0541a59a81439842f46d95396d3e7189 /contrib/tools/cython/Cython/Compiler
parent8bfdfa9a9bd19bddbc58d888e180fbd1218681be (diff)
downloadydb-bf0f13dd39ee3e65092ba3572bb5b1fcd125dcd0.tar.gz
add ymake export to ydb
Diffstat (limited to 'contrib/tools/cython/Cython/Compiler')
-rw-r--r--contrib/tools/cython/Cython/Compiler/AnalysedTreeTransforms.py99
-rw-r--r--contrib/tools/cython/Cython/Compiler/Annotate.py320
-rw-r--r--contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py214
-rw-r--r--contrib/tools/cython/Cython/Compiler/Buffer.py740
-rw-r--r--contrib/tools/cython/Cython/Compiler/Builtin.py444
-rw-r--r--contrib/tools/cython/Cython/Compiler/CmdLine.py244
-rw-r--r--contrib/tools/cython/Cython/Compiler/Code.pxd124
-rw-r--r--contrib/tools/cython/Cython/Compiler/Code.py2597
-rw-r--r--contrib/tools/cython/Cython/Compiler/CodeGeneration.py35
-rw-r--r--contrib/tools/cython/Cython/Compiler/CythonScope.py164
-rw-r--r--contrib/tools/cython/Cython/Compiler/DebugFlags.py21
-rw-r--r--contrib/tools/cython/Cython/Compiler/Errors.py265
-rw-r--r--contrib/tools/cython/Cython/Compiler/ExprNodes.py13719
-rw-r--r--contrib/tools/cython/Cython/Compiler/FlowControl.pxd111
-rw-r--r--contrib/tools/cython/Cython/Compiler/FlowControl.py1325
-rw-r--r--contrib/tools/cython/Cython/Compiler/FusedNode.py901
-rw-r--r--contrib/tools/cython/Cython/Compiler/Future.py15
-rw-r--r--contrib/tools/cython/Cython/Compiler/Interpreter.py64
-rw-r--r--contrib/tools/cython/Cython/Compiler/Lexicon.py138
-rw-r--r--contrib/tools/cython/Cython/Compiler/Main.py920
-rw-r--r--contrib/tools/cython/Cython/Compiler/MemoryView.py858
-rw-r--r--contrib/tools/cython/Cython/Compiler/ModuleNode.py3223
-rw-r--r--contrib/tools/cython/Cython/Compiler/Naming.py162
-rw-r--r--contrib/tools/cython/Cython/Compiler/Nodes.py9456
-rw-r--r--contrib/tools/cython/Cython/Compiler/Optimize.py4857
-rw-r--r--contrib/tools/cython/Cython/Compiler/Options.py555
-rw-r--r--contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd82
-rw-r--r--contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py3535
-rw-r--r--contrib/tools/cython/Cython/Compiler/Parsing.pxd199
-rw-r--r--contrib/tools/cython/Cython/Compiler/Parsing.py3860
-rw-r--r--contrib/tools/cython/Cython/Compiler/Pipeline.py369
-rw-r--r--contrib/tools/cython/Cython/Compiler/PyrexTypes.py4745
-rw-r--r--contrib/tools/cython/Cython/Compiler/Pythran.py227
-rw-r--r--contrib/tools/cython/Cython/Compiler/Scanning.pxd67
-rw-r--r--contrib/tools/cython/Cython/Compiler/Scanning.py553
-rw-r--r--contrib/tools/cython/Cython/Compiler/StringEncoding.py363
-rw-r--r--contrib/tools/cython/Cython/Compiler/Symtab.py2552
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestBuffer.py105
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py170
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py68
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py129
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py71
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestParseTreeTransforms.py289
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py73
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestStringEncoding.py44
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py64
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py94
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py19
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py101
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestVisitor.py61
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/__init__.py1
-rw-r--r--contrib/tools/cython/Cython/Compiler/TreeFragment.py275
-rw-r--r--contrib/tools/cython/Cython/Compiler/TreePath.py296
-rw-r--r--contrib/tools/cython/Cython/Compiler/TypeInference.py591
-rw-r--r--contrib/tools/cython/Cython/Compiler/TypeSlots.py941
-rw-r--r--contrib/tools/cython/Cython/Compiler/UtilNodes.py359
-rw-r--r--contrib/tools/cython/Cython/Compiler/UtilityCode.py237
-rw-r--r--contrib/tools/cython/Cython/Compiler/Version.py9
-rw-r--r--contrib/tools/cython/Cython/Compiler/Visitor.pxd55
-rw-r--r--contrib/tools/cython/Cython/Compiler/Visitor.py840
-rw-r--r--contrib/tools/cython/Cython/Compiler/__init__.py1
61 files changed, 63016 insertions, 0 deletions
diff --git a/contrib/tools/cython/Cython/Compiler/AnalysedTreeTransforms.py b/contrib/tools/cython/Cython/Compiler/AnalysedTreeTransforms.py
new file mode 100644
index 0000000000..07bf31f3e0
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/AnalysedTreeTransforms.py
@@ -0,0 +1,99 @@
+from __future__ import absolute_import
+
+from .Visitor import ScopeTrackingTransform
+from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
+from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
+from .PyrexTypes import py_object_type
+from .StringEncoding import EncodedString
+from . import Symtab
+
+class AutoTestDictTransform(ScopeTrackingTransform):
+ # Handles autotestdict directive
+
+ blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
+ '__nonzero__', '__bool__',
+ '__len__', '__contains__']
+
+ def visit_ModuleNode(self, node):
+ if node.is_pxd:
+ return node
+ self.scope_type = 'module'
+ self.scope_node = node
+
+ if not self.current_directives['autotestdict']:
+ return node
+ self.all_docstrings = self.current_directives['autotestdict.all']
+ self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
+
+ assert isinstance(node.body, StatListNode)
+
+ # First see if __test__ is already created
+ if u'__test__' in node.scope.entries:
+ # Do nothing
+ return node
+
+ pos = node.pos
+
+ self.tests = []
+ self.testspos = node.pos
+
+ test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
+ py_object_type,
+ pos,
+ visibility='public')
+ create_test_dict_assignment = SingleAssignmentNode(pos,
+ lhs=NameNode(pos, name=EncodedString(u'__test__'),
+ entry=test_dict_entry),
+ rhs=DictNode(pos, key_value_pairs=self.tests))
+ self.visitchildren(node)
+ node.body.stats.append(create_test_dict_assignment)
+ return node
+
+ def add_test(self, testpos, path, doctest):
+ pos = self.testspos
+ keystr = u'%s (line %d)' % (path, testpos[1])
+ key = UnicodeNode(pos, value=EncodedString(keystr))
+ value = UnicodeNode(pos, value=doctest)
+ self.tests.append(DictItemNode(pos, key=key, value=value))
+
+ def visit_ExprNode(self, node):
+ # expressions cannot contain functions and lambda expressions
+ # do not have a docstring
+ return node
+
+ def visit_FuncDefNode(self, node):
+ if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
+ return node
+ if not self.cdef_docstrings:
+ if isinstance(node, CFuncDefNode) and not node.py_func:
+ return node
+ if not self.all_docstrings and '>>>' not in node.doc:
+ return node
+
+ pos = self.testspos
+ if self.scope_type == 'module':
+ path = node.entry.name
+ elif self.scope_type in ('pyclass', 'cclass'):
+ if isinstance(node, CFuncDefNode):
+ if node.py_func is not None:
+ name = node.py_func.name
+ else:
+ name = node.entry.name
+ else:
+ name = node.name
+ if self.scope_type == 'cclass' and name in self.blacklist:
+ return node
+ if self.scope_type == 'pyclass':
+ class_name = self.scope_node.name
+ else:
+ class_name = self.scope_node.class_name
+ if isinstance(node.entry.scope, Symtab.PropertyScope):
+ property_method_name = node.entry.scope.name
+ path = "%s.%s.%s" % (class_name, node.entry.scope.name,
+ node.entry.name)
+ else:
+ path = "%s.%s" % (class_name, node.entry.name)
+ else:
+ assert False
+ self.add_test(node.pos, path, node.doc)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/Annotate.py b/contrib/tools/cython/Cython/Compiler/Annotate.py
new file mode 100644
index 0000000000..2ea38c00c7
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Annotate.py
@@ -0,0 +1,320 @@
+# Note: Work in progress
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import re
+import codecs
+import textwrap
+from datetime import datetime
+from functools import partial
+from collections import defaultdict
+try:
+ from xml.sax.saxutils import escape as html_escape
+except ImportError:
+ pass
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO # does not support writing 'str' in Py2
+
+from . import Version
+from .Code import CCodeWriter
+from .. import Utils
+
+
+class AnnotationCCodeWriter(CCodeWriter):
+
+ def __init__(self, create_from=None, buffer=None, copy_formatting=True):
+ CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting)
+ if create_from is None:
+ self.annotation_buffer = StringIO()
+ self.last_annotated_pos = None
+ # annotations[filename][line] -> [(column, AnnotationItem)*]
+ self.annotations = defaultdict(partial(defaultdict, list))
+ # code[filename][line] -> str
+ self.code = defaultdict(partial(defaultdict, str))
+ # scopes[filename][line] -> set(scopes)
+ self.scopes = defaultdict(partial(defaultdict, set))
+ else:
+ # When creating an insertion point, keep references to the same database
+ self.annotation_buffer = create_from.annotation_buffer
+ self.annotations = create_from.annotations
+ self.code = create_from.code
+ self.scopes = create_from.scopes
+ self.last_annotated_pos = create_from.last_annotated_pos
+
+ def create_new(self, create_from, buffer, copy_formatting):
+ return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
+
+ def write(self, s):
+ CCodeWriter.write(self, s)
+ self.annotation_buffer.write(s)
+
+ def mark_pos(self, pos, trace=True):
+ if pos is not None:
+ CCodeWriter.mark_pos(self, pos, trace)
+ if self.funcstate and self.funcstate.scope:
+ # lambdas and genexprs can result in multiple scopes per line => keep them in a set
+ self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope)
+ if self.last_annotated_pos:
+ source_desc, line, _ = self.last_annotated_pos
+ pos_code = self.code[source_desc.filename]
+ pos_code[line] += self.annotation_buffer.getvalue()
+ self.annotation_buffer = StringIO()
+ self.last_annotated_pos = pos
+
+ def annotate(self, pos, item):
+ self.annotations[pos[0].filename][pos[1]].append((pos[2], item))
+
+ def _css(self):
+ """css template will later allow to choose a colormap"""
+ css = [self._css_template]
+ for i in range(255):
+ color = u"FFFF%02x" % int(255/(1+i/10.0))
+ css.append('.cython.score-%d {background-color: #%s;}' % (i, color))
+ try:
+ from pygments.formatters import HtmlFormatter
+ except ImportError:
+ pass
+ else:
+ css.append(HtmlFormatter().get_style_defs('.cython'))
+ return '\n'.join(css)
+
+ _css_template = textwrap.dedent("""
+ body.cython { font-family: courier; font-size: 12; }
+
+ .cython.tag { }
+ .cython.line { margin: 0em }
+ .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; }
+
+ .cython.line .run { background-color: #B0FFB0; }
+ .cython.line .mis { background-color: #FFB0B0; }
+ .cython.code.run { border-left: 8px solid #B0FFB0; }
+ .cython.code.mis { border-left: 8px solid #FFB0B0; }
+
+ .cython.code .py_c_api { color: red; }
+ .cython.code .py_macro_api { color: #FF7000; }
+ .cython.code .pyx_c_api { color: #FF3000; }
+ .cython.code .pyx_macro_api { color: #FF7000; }
+ .cython.code .refnanny { color: #FFA000; }
+ .cython.code .trace { color: #FFA000; }
+ .cython.code .error_goto { color: #FFA000; }
+
+ .cython.code .coerce { color: #008000; border: 1px dotted #008000 }
+ .cython.code .py_attr { color: #FF0000; font-weight: bold; }
+ .cython.code .c_attr { color: #0000FF; }
+ .cython.code .py_call { color: #FF0000; font-weight: bold; }
+ .cython.code .c_call { color: #0000FF; }
+ """)
+
+ # on-click toggle function to show/hide C source code
+ _onclick_attr = ' onclick="{0}"'.format((
+ "(function(s){"
+ " s.display = s.display === 'block' ? 'none' : 'block'"
+ "})(this.nextElementSibling.style)"
+ ).replace(' ', '') # poor dev's JS minification
+ )
+
+ def save_annotation(self, source_filename, target_filename, coverage_xml=None):
+ with Utils.open_source_file(source_filename) as f:
+ code = f.read()
+ generated_code = self.code.get(source_filename, {})
+ c_file = Utils.decode_filename(os.path.basename(target_filename))
+ html_filename = os.path.splitext(target_filename)[0] + ".html"
+
+ with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer:
+ out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml))
+
+ def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None):
+ coverage_info = ''
+ if coverage_timestamp:
+ coverage_info = u' with coverage data from {timestamp}'.format(
+ timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000))
+
+ outlist = [
+ textwrap.dedent(u'''\
+ <!DOCTYPE html>
+ <!-- Generated by Cython {watermark} -->
+ <html>
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>Cython: {filename}</title>
+ <style type="text/css">
+ {css}
+ </style>
+ </head>
+ <body class="cython">
+ <p><span style="border-bottom: solid 1px grey;">Generated by Cython {watermark}</span>{more_info}</p>
+ <p>
+ <span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br />
+ Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it.
+ </p>
+ ''').format(css=self._css(), watermark=Version.watermark,
+ filename=os.path.basename(source_filename) if source_filename else '',
+ more_info=coverage_info)
+ ]
+ if c_file:
+ outlist.append(u'<p>Raw output: <a href="%s">%s</a></p>\n' % (c_file, c_file))
+ return outlist
+
+ def _save_annotation_footer(self):
+ return (u'</body></html>\n',)
+
+ def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None):
+ """
+ lines : original cython source code split by lines
+ generated_code : generated c code keyed by line number in original file
+ target filename : name of the file in which to store the generated html
+ c_file : filename in which the c_code has been written
+ """
+ if coverage_xml is not None and source_filename:
+ coverage_timestamp = coverage_xml.get('timestamp', '').strip()
+ covered_lines = self._get_line_coverage(coverage_xml, source_filename)
+ else:
+ coverage_timestamp = covered_lines = None
+ annotation_items = dict(self.annotations[source_filename])
+ scopes = dict(self.scopes[source_filename])
+
+ outlist = []
+ outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp))
+ outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines))
+ outlist.extend(self._save_annotation_footer())
+ return ''.join(outlist)
+
+ def _get_line_coverage(self, coverage_xml, source_filename):
+ coverage_data = None
+ for entry in coverage_xml.iterfind('.//class'):
+ if not entry.get('filename'):
+ continue
+ if (entry.get('filename') == source_filename or
+ os.path.abspath(entry.get('filename')) == source_filename):
+ coverage_data = entry
+ break
+ elif source_filename.endswith(entry.get('filename')):
+ coverage_data = entry # but we might still find a better match...
+ if coverage_data is None:
+ return None
+ return dict(
+ (int(line.get('number')), int(line.get('hits')))
+ for line in coverage_data.iterfind('lines/line')
+ )
+
+ def _htmlify_code(self, code):
+ try:
+ from pygments import highlight
+ from pygments.lexers import CythonLexer
+ from pygments.formatters import HtmlFormatter
+ except ImportError:
+ # no Pygments, just escape the code
+ return html_escape(code)
+
+ html_code = highlight(
+ code, CythonLexer(stripnl=False, stripall=False),
+ HtmlFormatter(nowrap=True))
+ return html_code
+
+ def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None):
+ outlist = [u'<div class="cython">']
+ pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n'
+ new_calls_map = dict(
+ (name, 0) for name in
+ 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split()
+ ).copy
+
+ self.mark_pos(None)
+
+ def annotate(match):
+ group_name = match.lastgroup
+ calls[group_name] += 1
+ return u"<span class='%s'>%s</span>" % (
+ group_name, match.group(group_name))
+
+ lines = self._htmlify_code(cython_code).splitlines()
+ lineno_width = len(str(len(lines)))
+ if not covered_lines:
+ covered_lines = None
+
+ for k, line in enumerate(lines, 1):
+ try:
+ c_code = generated_code[k]
+ except KeyError:
+ c_code = ''
+ else:
+ c_code = _replace_pos_comment(pos_comment_marker, c_code)
+ if c_code.startswith(pos_comment_marker):
+ c_code = c_code[len(pos_comment_marker):]
+ c_code = html_escape(c_code)
+
+ calls = new_calls_map()
+ c_code = _parse_code(annotate, c_code)
+ score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] +
+ calls['py_macro_api'] + calls['pyx_macro_api'])
+
+ if c_code:
+ onclick = self._onclick_attr
+ expandsymbol = '+'
+ else:
+ onclick = ''
+ expandsymbol = '&#xA0;'
+
+ covered = ''
+ if covered_lines is not None and k in covered_lines:
+ hits = covered_lines[k]
+ if hits is not None:
+ covered = 'run' if hits else 'mis'
+
+ outlist.append(
+ u'<pre class="cython line score-{score}"{onclick}>'
+ # generate line number with expand symbol in front,
+ # and the right number of digit
+ u'{expandsymbol}<span class="{covered}">{line:0{lineno_width}d}</span>: {code}</pre>\n'.format(
+ score=score,
+ expandsymbol=expandsymbol,
+ covered=covered,
+ lineno_width=lineno_width,
+ line=k,
+ code=line.rstrip(),
+ onclick=onclick,
+ ))
+ if c_code:
+ outlist.append(u"<pre class='cython code score-{score} {covered}'>{code}</pre>".format(
+ score=score, covered=covered, code=c_code))
+ outlist.append(u"</div>")
+ return outlist
+
+
+_parse_code = re.compile((
+ br'(?P<refnanny>__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|'
+ br'(?P<trace>__Pyx_Trace[A-Za-z]+)|'
+ br'(?:'
+ br'(?P<pyx_macro_api>__Pyx_[A-Z][A-Z_]+)|'
+ br'(?P<pyx_c_api>(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|'
+ br'(?P<py_macro_api>Py[A-Z][a-z]+_[A-Z][A-Z_]+)|'
+ br'(?P<py_c_api>Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)'
+ br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement
+ br'(?P<error_goto>(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
+).decode('ascii')).sub
+
+
+_replace_pos_comment = re.compile(
+ # this matches what Cython generates as code line marker comment
+ br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'),
+ re.M
+).sub
+
+
+class AnnotationItem(object):
+
+ def __init__(self, style, text, tag="", size=0):
+ self.style = style
+ self.text = text
+ self.tag = tag
+ self.size = size
+
+ def start(self):
+ return u"<span class='cython tag %s' title='%s'>%s" % (self.style, self.text, self.tag)
+
+ def end(self):
+ return self.size, u"</span>"
diff --git a/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py
new file mode 100644
index 0000000000..d3c0a1d0da
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py
@@ -0,0 +1,214 @@
+from __future__ import absolute_import, print_function
+
+from .Visitor import CythonTransform
+from .StringEncoding import EncodedString
+from . import Options
+from . import PyrexTypes, ExprNodes
+from ..CodeWriter import ExpressionWriter
+
+
+class AnnotationWriter(ExpressionWriter):
+
+ def visit_Node(self, node):
+ self.put(u"<???>")
+
+ def visit_LambdaNode(self, node):
+ # XXX Should we do better?
+ self.put("<lambda>")
+
+
+class EmbedSignature(CythonTransform):
+
+ def __init__(self, context):
+ super(EmbedSignature, self).__init__(context)
+ self.class_name = None
+ self.class_node = None
+
+ def _fmt_expr(self, node):
+ writer = AnnotationWriter()
+ result = writer.write(node)
+ # print(type(node).__name__, '-->', result)
+ return result
+
+ def _fmt_arg(self, arg):
+ if arg.type is PyrexTypes.py_object_type or arg.is_self_arg:
+ doc = arg.name
+ else:
+ doc = arg.type.declaration_code(arg.name, for_display=1)
+
+ if arg.annotation:
+ annotation = self._fmt_expr(arg.annotation)
+ doc = doc + (': %s' % annotation)
+ if arg.default:
+ default = self._fmt_expr(arg.default)
+ doc = doc + (' = %s' % default)
+ elif arg.default:
+ default = self._fmt_expr(arg.default)
+ doc = doc + ('=%s' % default)
+ return doc
+
+ def _fmt_star_arg(self, arg):
+ arg_doc = arg.name
+ if arg.annotation:
+ annotation = self._fmt_expr(arg.annotation)
+ arg_doc = arg_doc + (': %s' % annotation)
+ return arg_doc
+
+ def _fmt_arglist(self, args,
+ npargs=0, pargs=None,
+ nkargs=0, kargs=None,
+ hide_self=False):
+ arglist = []
+ for arg in args:
+ if not hide_self or not arg.entry.is_self_arg:
+ arg_doc = self._fmt_arg(arg)
+ arglist.append(arg_doc)
+ if pargs:
+ arg_doc = self._fmt_star_arg(pargs)
+ arglist.insert(npargs, '*%s' % arg_doc)
+ elif nkargs:
+ arglist.insert(npargs, '*')
+ if kargs:
+ arg_doc = self._fmt_star_arg(kargs)
+ arglist.append('**%s' % arg_doc)
+ return arglist
+
+ def _fmt_ret_type(self, ret):
+ if ret is PyrexTypes.py_object_type:
+ return None
+ else:
+ return ret.declaration_code("", for_display=1)
+
+ def _fmt_signature(self, cls_name, func_name, args,
+ npargs=0, pargs=None,
+ nkargs=0, kargs=None,
+ return_expr=None,
+ return_type=None, hide_self=False):
+ arglist = self._fmt_arglist(args,
+ npargs, pargs,
+ nkargs, kargs,
+ hide_self=hide_self)
+ arglist_doc = ', '.join(arglist)
+ func_doc = '%s(%s)' % (func_name, arglist_doc)
+ if cls_name:
+ func_doc = '%s.%s' % (cls_name, func_doc)
+ ret_doc = None
+ if return_expr:
+ ret_doc = self._fmt_expr(return_expr)
+ elif return_type:
+ ret_doc = self._fmt_ret_type(return_type)
+ if ret_doc:
+ func_doc = '%s -> %s' % (func_doc, ret_doc)
+ return func_doc
+
+ def _embed_signature(self, signature, node_doc):
+ if node_doc:
+ return "%s\n%s" % (signature, node_doc)
+ else:
+ return signature
+
+ def __call__(self, node):
+ if not Options.docstrings:
+ return node
+ else:
+ return super(EmbedSignature, self).__call__(node)
+
+ def visit_ClassDefNode(self, node):
+ oldname = self.class_name
+ oldclass = self.class_node
+ self.class_node = node
+ try:
+ # PyClassDefNode
+ self.class_name = node.name
+ except AttributeError:
+ # CClassDefNode
+ self.class_name = node.class_name
+ self.visitchildren(node)
+ self.class_name = oldname
+ self.class_node = oldclass
+ return node
+
+ def visit_LambdaNode(self, node):
+ # lambda expressions so not have signature or inner functions
+ return node
+
+ def visit_DefNode(self, node):
+ if not self.current_directives['embedsignature']:
+ return node
+
+ is_constructor = False
+ hide_self = False
+ if node.entry.is_special:
+ is_constructor = self.class_node and node.name == '__init__'
+ if not is_constructor:
+ return node
+ class_name, func_name = None, self.class_name
+ hide_self = True
+ else:
+ class_name, func_name = self.class_name, node.name
+
+ nkargs = getattr(node, 'num_kwonly_args', 0)
+ npargs = len(node.args) - nkargs
+ signature = self._fmt_signature(
+ class_name, func_name, node.args,
+ npargs, node.star_arg,
+ nkargs, node.starstar_arg,
+ return_expr=node.return_type_annotation,
+ return_type=None, hide_self=hide_self)
+ if signature:
+ if is_constructor:
+ doc_holder = self.class_node.entry.type.scope
+ else:
+ doc_holder = node.entry
+
+ if doc_holder.doc is not None:
+ old_doc = doc_holder.doc
+ elif not is_constructor and getattr(node, 'py_func', None) is not None:
+ old_doc = node.py_func.entry.doc
+ else:
+ old_doc = None
+ new_doc = self._embed_signature(signature, old_doc)
+ doc_holder.doc = EncodedString(new_doc)
+ if not is_constructor and getattr(node, 'py_func', None) is not None:
+ node.py_func.entry.doc = EncodedString(new_doc)
+ return node
+
+ def visit_CFuncDefNode(self, node):
+ if not self.current_directives['embedsignature']:
+ return node
+ if not node.overridable: # not cpdef FOO(...):
+ return node
+
+ signature = self._fmt_signature(
+ self.class_name, node.declarator.base.name,
+ node.declarator.args,
+ return_type=node.return_type)
+ if signature:
+ if node.entry.doc is not None:
+ old_doc = node.entry.doc
+ elif getattr(node, 'py_func', None) is not None:
+ old_doc = node.py_func.entry.doc
+ else:
+ old_doc = None
+ new_doc = self._embed_signature(signature, old_doc)
+ node.entry.doc = EncodedString(new_doc)
+ if hasattr(node, 'py_func') and node.py_func is not None:
+ node.py_func.entry.doc = EncodedString(new_doc)
+ return node
+
+ def visit_PropertyNode(self, node):
+ if not self.current_directives['embedsignature']:
+ return node
+
+ entry = node.entry
+ if entry.visibility == 'public':
+ # property synthesised from a cdef public attribute
+ type_name = entry.type.declaration_code("", for_display=1)
+ if not entry.type.is_pyobject:
+ type_name = "'%s'" % type_name
+ elif entry.type.is_extension_type:
+ type_name = entry.type.module_name + '.' + type_name
+ signature = '%s: %s' % (entry.name, type_name)
+ new_doc = self._embed_signature(signature, entry.doc)
+ entry.doc = EncodedString(new_doc)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/Buffer.py b/contrib/tools/cython/Cython/Compiler/Buffer.py
new file mode 100644
index 0000000000..c62a24f568
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Buffer.py
@@ -0,0 +1,740 @@
+from __future__ import absolute_import
+
+from .Visitor import CythonTransform
+from .ModuleNode import ModuleNode
+from .Errors import CompileError
+from .UtilityCode import CythonUtilityCode
+from .Code import UtilityCode, TempitaUtilityCode
+
+from . import Options
+from . import Interpreter
+from . import PyrexTypes
+from . import Naming
+from . import Symtab
+
+def dedent(text, reindent=0):
+ from textwrap import dedent
+ text = dedent(text)
+ if reindent > 0:
+ indent = " " * reindent
+ text = '\n'.join([indent + x for x in text.split('\n')])
+ return text
+
+class IntroduceBufferAuxiliaryVars(CythonTransform):
+
+ #
+ # Entry point
+ #
+
+ buffers_exists = False
+ using_memoryview = False
+
+ def __call__(self, node):
+ assert isinstance(node, ModuleNode)
+ self.max_ndim = 0
+ result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
+ if self.buffers_exists:
+ use_bufstruct_declare_code(node.scope)
+ use_py2_buffer_functions(node.scope)
+
+ return result
+
+
+ #
+ # Basic operations for transforms
+ #
+ def handle_scope(self, node, scope):
+ # For all buffers, insert extra variables in the scope.
+ # The variables are also accessible from the buffer_info
+ # on the buffer entry
+ scope_items = scope.entries.items()
+ bufvars = [entry for name, entry in scope_items if entry.type.is_buffer]
+ if len(bufvars) > 0:
+ bufvars.sort(key=lambda entry: entry.name)
+ self.buffers_exists = True
+
+ memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice]
+ if len(memviewslicevars) > 0:
+ self.buffers_exists = True
+
+
+ for (name, entry) in scope_items:
+ if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode):
+ self.using_memoryview = True
+ break
+ del scope_items
+
+ if isinstance(node, ModuleNode) and len(bufvars) > 0:
+ # for now...note that pos is wrong
+ raise CompileError(node.pos, "Buffer vars not allowed in module scope")
+ for entry in bufvars:
+ if entry.type.dtype.is_ptr:
+ raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
+
+ name = entry.name
+ buftype = entry.type
+ if buftype.ndim > Options.buffer_max_dims:
+ raise CompileError(node.pos,
+ "Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims)
+ if buftype.ndim > self.max_ndim:
+ self.max_ndim = buftype.ndim
+
+ # Declare auxiliary vars
+ def decvar(type, prefix):
+ cname = scope.mangle(prefix, name)
+ aux_var = scope.declare_var(name=None, cname=cname,
+ type=type, pos=node.pos)
+ if entry.is_arg:
+ aux_var.used = True # otherwise, NameNode will mark whether it is used
+
+ return aux_var
+
+ auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix),
+ (PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix))
+ pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars]
+
+ entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer)
+
+ scope.buffer_entries = bufvars
+ self.scope = scope
+
+ def visit_ModuleNode(self, node):
+ self.handle_scope(node, node.scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ self.handle_scope(node, node.local_scope)
+ self.visitchildren(node)
+ return node
+
+#
+# Analysis
+#
+buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered!
+buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False}
+buffer_positional_options_count = 1 # anything beyond this needs keyword argument
+
+ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
+ERR_BUF_TOO_MANY = 'Too many buffer options'
+ERR_BUF_DUP = '"%s" buffer option already supplied'
+ERR_BUF_MISSING = '"%s" missing'
+ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)'
+ERR_BUF_NDIM = 'ndim must be a non-negative integer'
+ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct'
+ERR_BUF_BOOL = '"%s" must be a boolean'
+
+def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
+ """
+ Must be called during type analysis, as analyse is called
+ on the dtype argument.
+
+ posargs and dictargs should consist of a list and a dict
+ of tuples (value, pos). Defaults should be a dict of values.
+
+ Returns a dict containing all the options a buffer can have and
+ its value (with the positions stripped).
+ """
+ if defaults is None:
+ defaults = buffer_defaults
+
+ posargs, dictargs = Interpreter.interpret_compiletime_options(
+ posargs, dictargs, type_env=env, type_args=(0, 'dtype'))
+
+ if len(posargs) > buffer_positional_options_count:
+ raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
+
+ options = {}
+ for name, (value, pos) in dictargs.items():
+ if not name in buffer_options:
+ raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
+ options[name] = value
+
+ for name, (value, pos) in zip(buffer_options, posargs):
+ if not name in buffer_options:
+ raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
+ if name in options:
+ raise CompileError(pos, ERR_BUF_DUP % name)
+ options[name] = value
+
+ # Check that they are all there and copy defaults
+ for name in buffer_options:
+ if not name in options:
+ try:
+ options[name] = defaults[name]
+ except KeyError:
+ if need_complete:
+ raise CompileError(globalpos, ERR_BUF_MISSING % name)
+
+ dtype = options.get("dtype")
+ if dtype and dtype.is_extension_type:
+ raise CompileError(globalpos, ERR_BUF_DTYPE)
+
+ ndim = options.get("ndim")
+ if ndim and (not isinstance(ndim, int) or ndim < 0):
+ raise CompileError(globalpos, ERR_BUF_NDIM)
+
+ mode = options.get("mode")
+ if mode and not (mode in ('full', 'strided', 'c', 'fortran')):
+ raise CompileError(globalpos, ERR_BUF_MODE)
+
+ def assert_bool(name):
+ x = options.get(name)
+ if not isinstance(x, bool):
+ raise CompileError(globalpos, ERR_BUF_BOOL % name)
+
+ assert_bool('negative_indices')
+ assert_bool('cast')
+
+ return options
+
+
+#
+# Code generation
+#
+
+class BufferEntry(object):
+ def __init__(self, entry):
+ self.entry = entry
+ self.type = entry.type
+ self.cname = entry.buffer_aux.buflocal_nd_var.cname
+ self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname
+ self.buf_ptr_type = entry.type.buffer_ptr_type
+ self.init_attributes()
+
+ def init_attributes(self):
+ self.shape = self.get_buf_shapevars()
+ self.strides = self.get_buf_stridevars()
+ self.suboffsets = self.get_buf_suboffsetvars()
+
+ def get_buf_suboffsetvars(self):
+ return self._for_all_ndim("%s.diminfo[%d].suboffsets")
+
+ def get_buf_stridevars(self):
+ return self._for_all_ndim("%s.diminfo[%d].strides")
+
+ def get_buf_shapevars(self):
+ return self._for_all_ndim("%s.diminfo[%d].shape")
+
+ def _for_all_ndim(self, s):
+ return [s % (self.cname, i) for i in range(self.type.ndim)]
+
+ def generate_buffer_lookup_code(self, code, index_cnames):
+ # Create buffer lookup and return it
+ # This is done via utility macros/inline functions, which vary
+ # according to the access mode used.
+ params = []
+ nd = self.type.ndim
+ mode = self.type.mode
+ if mode == 'full':
+ for i, s, o in zip(index_cnames,
+ self.get_buf_stridevars(),
+ self.get_buf_suboffsetvars()):
+ params.append(i)
+ params.append(s)
+ params.append(o)
+ funcname = "__Pyx_BufPtrFull%dd" % nd
+ funcgen = buf_lookup_full_code
+ else:
+ if mode == 'strided':
+ funcname = "__Pyx_BufPtrStrided%dd" % nd
+ funcgen = buf_lookup_strided_code
+ elif mode == 'c':
+ funcname = "__Pyx_BufPtrCContig%dd" % nd
+ funcgen = buf_lookup_c_code
+ elif mode == 'fortran':
+ funcname = "__Pyx_BufPtrFortranContig%dd" % nd
+ funcgen = buf_lookup_fortran_code
+ else:
+ assert False
+ for i, s in zip(index_cnames, self.get_buf_stridevars()):
+ params.append(i)
+ params.append(s)
+
+ # Make sure the utility code is available
+ if funcname not in code.globalstate.utility_codes:
+ code.globalstate.utility_codes.add(funcname)
+ protocode = code.globalstate['utility_code_proto']
+ defcode = code.globalstate['utility_code_def']
+ funcgen(protocode, defcode, name=funcname, nd=nd)
+
+ buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code()
+ ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr,
+ ", ".join(params))
+ return ptrcode
+
+
+def get_flags(buffer_aux, buffer_type):
+ flags = 'PyBUF_FORMAT'
+ mode = buffer_type.mode
+ if mode == 'full':
+ flags += '| PyBUF_INDIRECT'
+ elif mode == 'strided':
+ flags += '| PyBUF_STRIDES'
+ elif mode == 'c':
+ flags += '| PyBUF_C_CONTIGUOUS'
+ elif mode == 'fortran':
+ flags += '| PyBUF_F_CONTIGUOUS'
+ else:
+ assert False
+ if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
+ return flags
+
+def used_buffer_aux_vars(entry):
+ buffer_aux = entry.buffer_aux
+ buffer_aux.buflocal_nd_var.used = True
+ buffer_aux.rcbuf_var.used = True
+
+def put_unpack_buffer_aux_into_scope(buf_entry, code):
+ # Generate code to copy the needed struct info into local
+ # variables.
+ buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+
+ fldnames = ['strides', 'shape']
+ if mode == 'full':
+ fldnames.append('suboffsets')
+
+ ln = []
+ for i in range(buf_entry.type.ndim):
+ for fldname in fldnames:
+ ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % \
+ (pybuffernd_struct, i, fldname,
+ pybuffernd_struct, fldname, i))
+ code.putln(' '.join(ln))
+
+def put_init_vars(entry, code):
+ bufaux = entry.buffer_aux
+ pybuffernd_struct = bufaux.buflocal_nd_var.cname
+ pybuffer_struct = bufaux.rcbuf_var.cname
+ # init pybuffer_struct
+ code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
+ code.putln("%s.refcount = 0;" % pybuffer_struct)
+ # init the buffer object
+ # code.put_init_var_to_py_none(entry)
+ # init the pybuffernd_struct
+ code.putln("%s.data = NULL;" % pybuffernd_struct)
+ code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
+
+
+def put_acquire_arg_buffer(entry, code, pos):
+ buffer_aux = entry.buffer_aux
+ getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
+
+ # Acquire any new buffer
+ code.putln("{")
+ code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
+ code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
+ code.putln("}")
+ # An exception raised in arg parsing cannot be caught, so no
+ # need to care about the buffer then.
+ put_unpack_buffer_aux_into_scope(entry, code)
+
+
+def put_release_buffer_code(code, entry):
+ code.globalstate.use_utility_code(acquire_utility_code)
+ code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
+
+
+def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
+ ndim = buffer_type.ndim
+ cast = int(buffer_type.cast)
+ flags = get_flags(buffer_aux, buffer_type)
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+
+ dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
+
+ code.globalstate.use_utility_code(acquire_utility_code)
+ return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
+ "(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
+ "%(cast)d, __pyx_stack)" % locals())
+
+
+def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
+ is_initialized, pos, code):
+ """
+ Generate code for reassigning a buffer variables. This only deals with getting
+ the buffer auxiliary structure and variables set up correctly, the assignment
+ itself and refcounting is the responsibility of the caller.
+
+ However, the assignment operation may throw an exception so that the reassignment
+ never happens.
+
+ Depending on the circumstances there are two possible outcomes:
+ - Old buffer released, new acquired, rhs assigned to lhs
+ - Old buffer released, new acquired which fails, reaqcuire old lhs buffer
+ (which may or may not succeed).
+ """
+
+ buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+ flags = get_flags(buffer_aux, buffer_type)
+
+ code.putln("{") # Set up necessary stack for getbuffer
+ code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
+
+ getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
+
+ if is_initialized:
+ # Release any existing buffer
+ code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
+ # Acquire
+ retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
+ code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
+ # If acquisition failed, attempt to reacquire the old buffer
+ # before raising the exception. A failure of reacquisition
+ # will cause the reacquisition exception to be reported, one
+ # can consider working around this later.
+ exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
+ for _ in range(3))
+ code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
+ code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
+ code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
+ code.globalstate.use_utility_code(raise_buffer_fallback_code)
+ code.putln('__Pyx_RaiseBufferFallbackError();')
+ code.putln('} else {')
+ code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
+ code.putln('}')
+ code.putln('%s = %s = %s = 0;' % exc_temps)
+ for t in exc_temps:
+ code.funcstate.release_temp(t)
+ code.putln('}')
+ # Unpack indices
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
+ code.putln(code.error_goto_if_neg(retcode_cname, pos))
+ code.funcstate.release_temp(retcode_cname)
+ else:
+ # Our entry had no previous value, so set to None when acquisition fails.
+ # In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
+ # so it suffices to set the buf field to NULL.
+ code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
+ code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
+ (lhs_cname,
+ PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
+ pybuffernd_struct))
+ code.putln(code.error_goto(pos))
+ code.put('} else {')
+ # Unpack indices
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
+ code.putln('}')
+
+ code.putln("}") # Release stack
+
+
+def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives,
+ pos, code, negative_indices, in_nogil_context):
+ """
+ Generates code to process indices and calculate an offset into
+ a buffer. Returns a C string which gives a pointer which can be
+ read from or written to at will (it is an expression so caller should
+ store it in a temporary if it is used more than once).
+
+ As the bounds checking can have any number of combinations of unsigned
+ arguments, smart optimizations etc. we insert it directly in the function
+ body. The lookup however is delegated to a inline function that is instantiated
+ once per ndim (lookup with suboffsets tend to get quite complicated).
+
+ entry is a BufferEntry
+ """
+ negative_indices = directives['wraparound'] and negative_indices
+
+ if directives['boundscheck']:
+ # Check bounds and fix negative indices.
+ # We allocate a temporary which is initialized to -1, meaning OK (!).
+ # If an error occurs, the temp is set to the index dimension the
+ # error is occurring at.
+ failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = -1;" % failed_dim_temp)
+ for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())):
+ if signed != 0:
+ # not unsigned, deal with negative index
+ code.putln("if (%s < 0) {" % cname)
+ if negative_indices:
+ code.putln("%s += %s;" % (cname, shape))
+ code.putln("if (%s) %s = %d;" % (
+ code.unlikely("%s < 0" % cname),
+ failed_dim_temp, dim))
+ else:
+ code.putln("%s = %d;" % (failed_dim_temp, dim))
+ code.put("} else ")
+ # check bounds in positive direction
+ if signed != 0:
+ cast = ""
+ else:
+ cast = "(size_t)"
+ code.putln("if (%s) %s = %d;" % (
+ code.unlikely("%s >= %s%s" % (cname, cast, shape)),
+ failed_dim_temp, dim))
+
+ if in_nogil_context:
+ code.globalstate.use_utility_code(raise_indexerror_nogil)
+ func = '__Pyx_RaiseBufferIndexErrorNogil'
+ else:
+ code.globalstate.use_utility_code(raise_indexerror_code)
+ func = '__Pyx_RaiseBufferIndexError'
+
+ code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp))
+ code.putln('%s(%s);' % (func, failed_dim_temp))
+ code.putln(code.error_goto(pos))
+ code.putln('}')
+ code.funcstate.release_temp(failed_dim_temp)
+ elif negative_indices:
+ # Only fix negative indices.
+ for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()):
+ if signed != 0:
+ code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
+
+ return entry.generate_buffer_lookup_code(code, index_cnames)
+
+
+def use_bufstruct_declare_code(env):
+ env.use_utility_code(buffer_struct_declare_code)
+
+
+def buf_lookup_full_code(proto, defin, name, nd):
+ """
+ Generates a buffer lookup function for the right number
+ of dimensions. The function gives back a void* at the right location.
+ """
+ # _i_ndex, _s_tride, sub_o_ffset
+ macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
+ proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
+
+ funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
+ proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
+ defin.putln(dedent("""
+ static CYTHON_INLINE void* %s_imp(void* buf, %s) {
+ char* ptr = (char*)buf;
+ """) % (name, funcargs) + "".join([dedent("""\
+ ptr += s%d * i%d;
+ if (o%d >= 0) ptr = *((char**)ptr) + o%d;
+ """) % (i, i, i, i) for i in range(nd)]
+ ) + "\nreturn ptr;\n}")
+
+
+def buf_lookup_strided_code(proto, defin, name, nd):
+ """
+ Generates a buffer lookup function for the right number
+ of dimensions. The function gives back a void* at the right location.
+ """
+ # _i_ndex, _s_tride
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
+ proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
+
+
+def buf_lookup_c_code(proto, defin, name, nd):
+ """
+ Similar to strided lookup, but can assume that the last dimension
+ doesn't need a multiplication as long as.
+ Still we keep the same signature for now.
+ """
+ if nd == 1:
+ proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
+ else:
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
+ proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
+
+
+def buf_lookup_fortran_code(proto, defin, name, nd):
+ """
+ Like C lookup, but the first index is optimized instead.
+ """
+ if nd == 1:
+ proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
+ else:
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
+ proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
+
+
+def use_py2_buffer_functions(env):
+ env.use_utility_code(GetAndReleaseBufferUtilityCode())
+
+
+class GetAndReleaseBufferUtilityCode(object):
+ # Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
+ # For >= 2.6 we do double mode -- use the new buffer interface on objects
+ # which has the right tp_flags set, but emulation otherwise.
+
+ requires = None
+ is_cython_utility = False
+
+ def __init__(self):
+ pass
+
+ def __eq__(self, other):
+ return isinstance(other, GetAndReleaseBufferUtilityCode)
+
+ def __hash__(self):
+ return 24342342
+
+ def get_tree(self, **kwargs): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto_code = output['utility_code_proto']
+ env = output.module_node.scope
+ cython_scope = env.context.cython_scope
+
+ # Search all types for __getbuffer__ overloads
+ types = []
+ visited_scopes = set()
+ def find_buffer_types(scope):
+ if scope in visited_scopes:
+ return
+ visited_scopes.add(scope)
+ for m in scope.cimported_modules:
+ find_buffer_types(m)
+ for e in scope.type_entries:
+ if isinstance(e.utility_code_definition, CythonUtilityCode):
+ continue
+ t = e.type
+ if t.is_extension_type:
+ if scope is cython_scope and not e.used:
+ continue
+ release = get = None
+ for x in t.scope.pyfunc_entries:
+ if x.name == u"__getbuffer__": get = x.func_cname
+ elif x.name == u"__releasebuffer__": release = x.func_cname
+ if get:
+ types.append((t.typeptr_cname, get, release))
+
+ find_buffer_types(env)
+
+ util_code = TempitaUtilityCode.load(
+ "GetAndReleaseBuffer", from_file="Buffer.c",
+ context=dict(types=types))
+
+ proto = util_code.format_code(util_code.proto)
+ impl = util_code.format_code(
+ util_code.inject_string_constants(util_code.impl, output)[1])
+
+ proto_code.putln(proto)
+ code.putln(impl)
+
+
+def mangle_dtype_name(dtype):
+ # Use prefixes to separate user defined types from builtins
+ # (consider "typedef float unsigned_int")
+ if dtype.is_pyobject:
+ return "object"
+ elif dtype.is_ptr:
+ return "ptr"
+ else:
+ if dtype.is_typedef or dtype.is_struct_or_union:
+ prefix = "nn_"
+ else:
+ prefix = ""
+ return prefix + dtype.specialization_name()
+
+def get_type_information_cname(code, dtype, maxdepth=None):
+ """
+ Output the run-time type information (__Pyx_TypeInfo) for given dtype,
+ and return the name of the type info struct.
+
+ Structs with two floats of the same size are encoded as complex numbers.
+ One can separate between complex numbers declared as struct or with native
+ encoding by inspecting to see if the fields field of the type is
+ filled in.
+ """
+ namesuffix = mangle_dtype_name(dtype)
+ name = "__Pyx_TypeInfo_%s" % namesuffix
+ structinfo_name = "__Pyx_StructFields_%s" % namesuffix
+
+ if dtype.is_error: return "<error>"
+
+ # It's critical that walking the type info doesn't use more stack
+ # depth than dtype.struct_nesting_depth() returns, so use an assertion for this
+ if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
+ if maxdepth <= 0:
+ assert False
+
+ if name not in code.globalstate.utility_codes:
+ code.globalstate.utility_codes.add(name)
+ typecode = code.globalstate['typeinfo']
+
+ arraysizes = []
+ if dtype.is_array:
+ while dtype.is_array:
+ arraysizes.append(dtype.size)
+ dtype = dtype.base_type
+
+ complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
+
+ declcode = dtype.empty_declaration_code()
+ if dtype.is_simple_buffer_dtype():
+ structinfo_name = "NULL"
+ elif dtype.is_struct:
+ struct_scope = dtype.scope
+ if dtype.is_const:
+ struct_scope = struct_scope.const_base_type_scope
+ # Must pre-call all used types in order not to recurse during utility code writing.
+ fields = struct_scope.var_entries
+ assert len(fields) > 0
+ types = [get_type_information_cname(code, f.type, maxdepth - 1)
+ for f in fields]
+ typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
+ for f, typeinfo in zip(fields, types):
+ typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
+ (typeinfo, f.name, dtype.empty_declaration_code(), f.cname), safe=True)
+ typecode.putln(' {NULL, NULL, 0}', safe=True)
+ typecode.putln("};", safe=True)
+ else:
+ assert False
+
+ rep = str(dtype)
+
+ flags = "0"
+ is_unsigned = "0"
+ if dtype is PyrexTypes.c_char_type:
+ is_unsigned = "IS_UNSIGNED(%s)" % declcode
+ typegroup = "'H'"
+ elif dtype.is_int:
+ is_unsigned = "IS_UNSIGNED(%s)" % declcode
+ typegroup = "%s ? 'U' : 'I'" % is_unsigned
+ elif complex_possible or dtype.is_complex:
+ typegroup = "'C'"
+ elif dtype.is_float:
+ typegroup = "'R'"
+ elif dtype.is_struct:
+ typegroup = "'S'"
+ if dtype.packed:
+ flags = "__PYX_BUF_FLAGS_PACKED_STRUCT"
+ elif dtype.is_pyobject:
+ typegroup = "'O'"
+ else:
+ assert False, dtype
+
+ typeinfo = ('static __Pyx_TypeInfo %s = '
+ '{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };')
+ tup = (name, rep, structinfo_name, declcode,
+ ', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
+ typegroup, is_unsigned, flags)
+ typecode.putln(typeinfo % tup, safe=True)
+
+ return name
+
+def load_buffer_utility(util_code_name, context=None, **kwargs):
+ if context is None:
+ return UtilityCode.load(util_code_name, "Buffer.c", **kwargs)
+ else:
+ return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
+
+context = dict(max_dims=Options.buffer_max_dims)
+buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
+buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
+
+# Utility function to set the right exception
+# The caller should immediately goto_error
+raise_indexerror_code = load_buffer_utility("BufferIndexError")
+raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
+raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
+
+acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
+buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
+
+# See utility code BufferFormatFromTypeInfo
+_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
diff --git a/contrib/tools/cython/Cython/Compiler/Builtin.py b/contrib/tools/cython/Cython/Compiler/Builtin.py
new file mode 100644
index 0000000000..e0d203ae02
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Builtin.py
@@ -0,0 +1,444 @@
+#
+# Builtin Definitions
+#
+
+from __future__ import absolute_import
+
+from .Symtab import BuiltinScope, StructOrUnionScope
+from .Code import UtilityCode
+from .TypeSlots import Signature
+from . import PyrexTypes
+from . import Options
+
+
+# C-level implementations of builtin types, functions and methods
+
+iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
+getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
+getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
+pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
+pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
+globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
+
+builtin_utility_code = {
+ 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
+}
+
+
+# mapping from builtins to their C-level equivalents
+
+class _BuiltinOverride(object):
+ def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
+ utility_code=None, sig=None, func_type=None,
+ is_strict_signature=False, builtin_return_type=None):
+ self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
+ self.args, self.ret_type = args, ret_type
+ self.func_type, self.sig = func_type, sig
+ self.builtin_return_type = builtin_return_type
+ self.is_strict_signature = is_strict_signature
+ self.utility_code = utility_code
+
+ def build_func_type(self, sig=None, self_arg=None):
+ if sig is None:
+ sig = Signature(self.args, self.ret_type)
+ sig.exception_check = False # not needed for the current builtins
+ func_type = sig.function_type(self_arg)
+ if self.is_strict_signature:
+ func_type.is_strict_signature = True
+ if self.builtin_return_type:
+ func_type.return_type = builtin_types[self.builtin_return_type]
+ return func_type
+
+
+class BuiltinAttribute(object):
+ def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
+ self.py_name = py_name
+ self.cname = cname or py_name
+ self.field_type_name = field_type_name # can't do the lookup before the type is declared!
+ self.field_type = field_type
+
+ def declare_in_type(self, self_type):
+ if self.field_type_name is not None:
+ # lazy type lookup
+ field_type = builtin_scope.lookup(self.field_type_name).type
+ else:
+ field_type = self.field_type or PyrexTypes.py_object_type
+ entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
+ entry.is_variable = True
+
+
+class BuiltinFunction(_BuiltinOverride):
+ def declare_in_scope(self, scope):
+ func_type, sig = self.func_type, self.sig
+ if func_type is None:
+ func_type = self.build_func_type(sig)
+ scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
+ self.py_equiv, self.utility_code)
+
+
+class BuiltinMethod(_BuiltinOverride):
+ def declare_in_type(self, self_type):
+ method_type, sig = self.func_type, self.sig
+ if method_type is None:
+ # override 'self' type (first argument)
+ self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
+ self_arg.not_none = True
+ self_arg.accept_builtin_subtypes = True
+ method_type = self.build_func_type(sig, self_arg)
+ self_type.scope.declare_builtin_cfunction(
+ self.py_name, method_type, self.cname, utility_code=self.utility_code)
+
+
+builtin_function_table = [
+ # name, args, return, C API func, py equiv = "*"
+ BuiltinFunction('abs', "d", "d", "fabs",
+ is_strict_signature = True),
+ BuiltinFunction('abs', "f", "f", "fabsf",
+ is_strict_signature = True),
+ BuiltinFunction('abs', "i", "i", "abs",
+ is_strict_signature = True),
+ BuiltinFunction('abs', "l", "l", "labs",
+ is_strict_signature = True),
+ BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
+ utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_longlong_type, [
+ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
+ ],
+ is_strict_signature = True, nogil=True)),
+ ] + list(
+ BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
+ func_type = PyrexTypes.CFuncType(
+ t,
+ [PyrexTypes.CFuncTypeArg("arg", t, None)],
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
+ ) + list(
+ BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
+ func_type = PyrexTypes.CFuncType(
+ t.real_type, [
+ PyrexTypes.CFuncTypeArg("arg", t, None)
+ ],
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_float_complex_type,
+ PyrexTypes.c_double_complex_type,
+ PyrexTypes.c_longdouble_complex_type)
+ ) + [
+ BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
+ utility_code=UtilityCode.load("py_abs", "Builtins.c")),
+ #('all', "", "", ""),
+ #('any', "", "", ""),
+ #('ascii', "", "", ""),
+ #('bin', "", "", ""),
+ BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
+ utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
+ #('chr', "", "", ""),
+ #('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
+ #('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
+ BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
+ BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
+ BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
+ BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
+ utility_code = pyexec_globals_utility_code),
+ BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
+ utility_code = pyexec_utility_code),
+ BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
+ utility_code = pyexec_utility_code),
+ #('eval', "", "", ""),
+ #('execfile', "", "", ""),
+ #('filter', "", "", ""),
+ BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
+ utility_code=getattr3_utility_code), # Pyrex legacy
+ BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
+ utility_code=getattr3_utility_code),
+ BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
+ utility_code=getattr_utility_code),
+ BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
+ utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
+ BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
+ #('hex', "", "", ""),
+ #('id', "", "", ""),
+ #('input', "", "", ""),
+ BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
+ utility_code = UtilityCode.load("Intern", "Builtins.c")),
+ BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
+ BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
+ BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
+ BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
+ BuiltinFunction('len', "O", "z", "PyObject_Length"),
+ BuiltinFunction('locals', "", "O", "__pyx_locals"),
+ #('map', "", "", ""),
+ #('max', "", "", ""),
+ #('min', "", "", ""),
+ BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
+ utility_code = iter_next_utility_code), # not available in Py2 => implemented here
+ BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
+ utility_code = iter_next_utility_code), # not available in Py2 => implemented here
+ #('oct', "", "", ""),
+ #('open', "ss", "O", "PyFile_FromString"), # not in Py3
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_long_cast",
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
+ is_strict_signature=True))
+ for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type]
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_uchar_cast",
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
+ is_strict_signature=True))
+ for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type]
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord",
+ utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"),
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_long_type, [
+ PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None)
+ ],
+ exception_value="(long)(Py_UCS4)-1")),
+ BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
+ BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
+ utility_code = UtilityCode.load("pow2", "Builtins.c")),
+ #('range', "", "", ""),
+ #('raw_input', "", "", ""),
+ #('reduce', "", "", ""),
+ BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
+ BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1
+ #('round', "", "", ""),
+ BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
+ #('sum', "", "", ""),
+ #('sorted', "", "", ""),
+ #('type', "O", "O", "PyObject_Type"),
+ #('unichr', "", "", ""),
+ #('unicode', "", "", ""),
+ #('vars', "", "", ""),
+ #('zip', "", "", ""),
+ # Can't do these easily until we have builtin type entries.
+ #('typecheck', "OO", "i", "PyObject_TypeCheck", False),
+ #('issubtype', "OO", "i", "PyType_IsSubtype", False),
+
+ # Put in namespace append optimization.
+ BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
+
+ # This is conditionally looked up based on a compiler directive.
+ BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals",
+ utility_code=globals_utility_code),
+]
+
+
+# Builtin types
+# bool
+# buffer
+# classmethod
+# dict
+# enumerate
+# file
+# float
+# int
+# list
+# long
+# object
+# property
+# slice
+# staticmethod
+# super
+# str
+# tuple
+# type
+# xrange
+
+builtin_types_table = [
+
+ ("type", "PyType_Type", []),
+
+# This conflicts with the C++ bool type, and unfortunately
+# C++ is too liberal about PyObject* <-> bool conversions,
+# resulting in unintuitive runtime behavior and segfaults.
+# ("bool", "PyBool_Type", []),
+
+ ("int", "PyInt_Type", []),
+ ("long", "PyLong_Type", []),
+ ("float", "PyFloat_Type", []),
+
+ ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
+ BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
+ BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
+ ]),
+
+ ("basestring", "PyBaseString_Type", [
+ BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ ]),
+ ("bytearray", "PyByteArray_Type", [
+ ]),
+ ("bytes", "PyBytes_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ ]),
+ ("str", "PyString_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
+ builtin_return_type='basestring',
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ ]),
+ ("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
+ BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
+ ]),
+
+ ("tuple", "PyTuple_Type", []),
+
+ ("list", "PyList_Type", [BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
+ BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
+ BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
+ utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
+ BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
+ utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
+ ]),
+
+ ("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
+ BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
+ BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
+ utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
+ BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
+ utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
+ BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
+ utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
+ BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
+ utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
+ BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
+ utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
+ BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
+ utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
+ BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
+ utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
+ BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
+ utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
+ BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
+ utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
+ BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
+ utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
+ BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
+
+ ("slice", "PySlice_Type", [BuiltinAttribute('start'),
+ BuiltinAttribute('stop'),
+ BuiltinAttribute('step'),
+ ]),
+# ("file", "PyFile_Type", []), # not in Py3
+
+ ("set", "PySet_Type", [BuiltinMethod("clear", "T", "r", "PySet_Clear"),
+ # discard() and remove() have a special treatment for unhashable values
+ BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
+ utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
+ BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
+ utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
+ # update is actually variadic (see Github issue #1645)
+# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
+# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
+ BuiltinMethod("add", "TO", "r", "PySet_Add"),
+ BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
+ ("frozenset", "PyFrozenSet_Type", []),
+ ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
+ ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
+]
+
+
+types_that_construct_their_instance = set([
+ # some builtin types do not always return an instance of
+ # themselves - these do:
+ 'type', 'bool', 'long', 'float', 'complex',
+ 'bytes', 'unicode', 'bytearray',
+ 'tuple', 'list', 'dict', 'set', 'frozenset'
+ # 'str', # only in Py3.x
+ # 'file', # only in Py2.x
+])
+
+
+builtin_structs_table = [
+ ('Py_buffer', 'Py_buffer',
+ [("buf", PyrexTypes.c_void_ptr_type),
+ ("obj", PyrexTypes.py_object_type),
+ ("len", PyrexTypes.c_py_ssize_t_type),
+ ("itemsize", PyrexTypes.c_py_ssize_t_type),
+ ("readonly", PyrexTypes.c_bint_type),
+ ("ndim", PyrexTypes.c_int_type),
+ ("format", PyrexTypes.c_char_ptr_type),
+ ("shape", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("strides", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
+ ("internal", PyrexTypes.c_void_ptr_type),
+ ]),
+ ('Py_complex', 'Py_complex',
+ [('real', PyrexTypes.c_double_type),
+ ('imag', PyrexTypes.c_double_type),
+ ])
+]
+
+# set up builtin scope
+
+builtin_scope = BuiltinScope()
+
+def init_builtin_funcs():
+ for bf in builtin_function_table:
+ bf.declare_in_scope(builtin_scope)
+
+builtin_types = {}
+
+def init_builtin_types():
+ global builtin_types
+ for name, cname, methods in builtin_types_table:
+ utility = builtin_utility_code.get(name)
+ if name == 'frozenset':
+ objstruct_cname = 'PySetObject'
+ elif name == 'bytearray':
+ objstruct_cname = 'PyByteArrayObject'
+ elif name == 'bool':
+ objstruct_cname = None
+ elif name == 'Exception':
+ objstruct_cname = "PyBaseExceptionObject"
+ elif name == 'StopAsyncIteration':
+ objstruct_cname = "PyBaseExceptionObject"
+ else:
+ objstruct_cname = 'Py%sObject' % name.capitalize()
+ the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
+ builtin_types[name] = the_type
+ for method in methods:
+ method.declare_in_type(the_type)
+
+def init_builtin_structs():
+ for name, cname, attribute_types in builtin_structs_table:
+ scope = StructOrUnionScope(name)
+ for attribute_name, attribute_type in attribute_types:
+ scope.declare_var(attribute_name, attribute_type, None,
+ attribute_name, allow_pyobject=True)
+ builtin_scope.declare_struct_or_union(
+ name, "struct", scope, 1, None, cname = cname)
+
+
+def init_builtins():
+ init_builtin_structs()
+ init_builtin_types()
+ init_builtin_funcs()
+
+ builtin_scope.declare_var(
+ '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
+ pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True)
+
+ global list_type, tuple_type, dict_type, set_type, frozenset_type
+ global bytes_type, str_type, unicode_type, basestring_type, slice_type
+ global float_type, bool_type, type_type, complex_type, bytearray_type
+ type_type = builtin_scope.lookup('type').type
+ list_type = builtin_scope.lookup('list').type
+ tuple_type = builtin_scope.lookup('tuple').type
+ dict_type = builtin_scope.lookup('dict').type
+ set_type = builtin_scope.lookup('set').type
+ frozenset_type = builtin_scope.lookup('frozenset').type
+ slice_type = builtin_scope.lookup('slice').type
+ bytes_type = builtin_scope.lookup('bytes').type
+ str_type = builtin_scope.lookup('str').type
+ unicode_type = builtin_scope.lookup('unicode').type
+ basestring_type = builtin_scope.lookup('basestring').type
+ bytearray_type = builtin_scope.lookup('bytearray').type
+ float_type = builtin_scope.lookup('float').type
+ bool_type = builtin_scope.lookup('bool').type
+ complex_type = builtin_scope.lookup('complex').type
+
+
+init_builtins()
diff --git a/contrib/tools/cython/Cython/Compiler/CmdLine.py b/contrib/tools/cython/Cython/Compiler/CmdLine.py
new file mode 100644
index 0000000000..db36a41f8f
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/CmdLine.py
@@ -0,0 +1,244 @@
+#
+# Cython - Command Line Parsing
+#
+
+from __future__ import absolute_import
+
+import os
+import sys
+from . import Options
+
+usage = """\
+Cython (http://cython.org) is a compiler for code written in the
+Cython language. Cython is based on Pyrex by Greg Ewing.
+
+Usage: cython [options] sourcefile.{pyx,py} ...
+
+Options:
+ -V, --version Display version number of cython compiler
+ -l, --create-listing Write error messages to a listing file
+ -I, --include-dir <directory> Search for include files in named directory
+ (multiple include directories are allowed).
+ -o, --output-file <filename> Specify name of generated C file
+ -t, --timestamps Only compile newer source files
+ -f, --force Compile all source files (overrides implied -t)
+ -v, --verbose Be verbose, print file names on multiple compilation
+ -p, --embed-positions If specified, the positions in Cython files of each
+ function definition is embedded in its docstring.
+ --cleanup <level> Release interned objects on python exit, for memory debugging.
+ Level indicates aggressiveness, default 0 releases nothing.
+ -w, --working <directory> Sets the working directory for Cython (the directory modules
+ are searched from)
+ --gdb Output debug information for cygdb
+ --gdb-outdir <directory> Specify gdb debug information output directory. Implies --gdb.
+
+ -D, --no-docstrings Strip docstrings from the compiled module.
+ -a, --annotate Produce a colorized HTML version of the source.
+ --annotate-coverage <cov.xml> Annotate and include coverage information from cov.xml.
+ --line-directives Produce #line directives pointing to the .pyx source
+ --cplus Output a C++ rather than C file.
+ --embed[=<method_name>] Generate a main() function that embeds the Python interpreter.
+ -2 Compile based on Python-2 syntax and code semantics.
+ -3 Compile based on Python-3 syntax and code semantics.
+ --3str Compile based on Python-3 syntax and code semantics without
+ assuming unicode by default for string literals under Python 2.
+ --lenient Change some compile time errors to runtime errors to
+ improve Python compatibility
+ --capi-reexport-cincludes Add cincluded headers to any auto-generated header files.
+ --fast-fail Abort the compilation on the first error
+ --warning-errors, -Werror Make all warnings into errors
+ --warning-extra, -Wextra Enable extra warnings
+ -X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive
+ -E, --compile-time-env name=value[,<name=value,...] Provides compile time env like DEF would do.
+ --module-name Fully qualified module name. If not given, it is deduced from the
+ import path if source file is in a package, or equals the
+ filename otherwise.
+ -M, --depfile Produce depfiles for the sources
+"""
+
+
+# The following experimental options are supported only on MacOSX:
+# -C, --compile Compile generated .c file to .o file
+# --link Link .o file to produce extension module (implies -C)
+# -+, --cplus Use C++ compiler for compiling and linking
+# Additional .o files to link may be supplied when using -X."""
+
+def bad_usage():
+ sys.stderr.write(usage)
+ sys.exit(1)
+
+def parse_command_line(args):
+ from .Main import CompilationOptions, default_options
+
+ pending_arg = []
+
+ def pop_arg():
+ if not args or pending_arg:
+ bad_usage()
+ if '=' in args[0] and args[0].startswith('--'): # allow "--long-option=xyz"
+ name, value = args.pop(0).split('=', 1)
+ pending_arg.append(value)
+ return name
+ return args.pop(0)
+
+ def pop_value(default=None):
+ if pending_arg:
+ return pending_arg.pop()
+ elif default is not None:
+ return default
+ elif not args:
+ bad_usage()
+ return args.pop(0)
+
+ def get_param(option):
+ tail = option[2:]
+ if tail:
+ return tail
+ else:
+ return pop_arg()
+
+ options = CompilationOptions(default_options)
+ sources = []
+ while args:
+ if args[0].startswith("-"):
+ option = pop_arg()
+ if option in ("-V", "--version"):
+ options.show_version = 1
+ elif option in ("-l", "--create-listing"):
+ options.use_listing_file = 1
+ elif option in ("-+", "--cplus"):
+ options.cplus = 1
+ elif option == "--embed":
+ Options.embed = pop_value("main")
+ elif option.startswith("-I"):
+ options.include_path.append(get_param(option))
+ elif option == "--include-dir":
+ options.include_path.append(pop_value())
+ elif option in ("-w", "--working"):
+ options.working_path = pop_value()
+ elif option in ("-o", "--output-file"):
+ options.output_file = pop_value()
+ elif option in ("-t", "--timestamps"):
+ options.timestamps = 1
+ elif option in ("-f", "--force"):
+ options.timestamps = 0
+ elif option in ("-v", "--verbose"):
+ options.verbose += 1
+ elif option in ("-p", "--embed-positions"):
+ Options.embed_pos_in_docstring = 1
+ elif option in ("-z", "--pre-import"):
+ Options.pre_import = pop_value()
+ elif option == "--cleanup":
+ Options.generate_cleanup_code = int(pop_value())
+ elif option in ("-D", "--no-docstrings"):
+ Options.docstrings = False
+ elif option in ("-a", "--annotate"):
+ Options.annotate = True
+ elif option == "--annotate-coverage":
+ Options.annotate = True
+ Options.annotate_coverage_xml = pop_value()
+ elif option == "--convert-range":
+ Options.convert_range = True
+ elif option == "--line-directives":
+ options.emit_linenums = True
+ elif option == "--no-c-in-traceback":
+ options.c_line_in_traceback = False
+ elif option == "--gdb":
+ options.gdb_debug = True
+ options.output_dir = os.curdir
+ elif option == "--gdb-outdir":
+ options.gdb_debug = True
+ options.output_dir = pop_value()
+ elif option == "--lenient":
+ Options.error_on_unknown_names = False
+ Options.error_on_uninitialized = False
+ elif option == '--init-suffix':
+ options.init_suffix = pop_arg()
+ elif option == '--source-root':
+ Options.source_root = pop_arg()
+ elif option == '-2':
+ options.language_level = 2
+ elif option == '-3':
+ options.language_level = 3
+ elif option == '--3str':
+ options.language_level = '3str'
+ elif option == "--capi-reexport-cincludes":
+ options.capi_reexport_cincludes = True
+ elif option == "--fast-fail":
+ Options.fast_fail = True
+ elif option == "--cimport-from-pyx":
+ Options.cimport_from_pyx = True
+ elif option in ('-Werror', '--warning-errors'):
+ Options.warning_errors = True
+ elif option in ('-Wextra', '--warning-extra'):
+ options.compiler_directives.update(Options.extra_warnings)
+ elif option == "--old-style-globals":
+ Options.old_style_globals = True
+ elif option == "--directive" or option.startswith('-X'):
+ if option.startswith('-X') and option[2:].strip():
+ x_args = option[2:]
+ else:
+ x_args = pop_value()
+ try:
+ options.compiler_directives = Options.parse_directive_list(
+ x_args, relaxed_bool=True,
+ current_settings=options.compiler_directives)
+ except ValueError as e:
+ sys.stderr.write("Error in compiler directive: %s\n" % e.args[0])
+ sys.exit(1)
+ elif option == "--compile-time-env" or option.startswith('-E'):
+ if option.startswith('-E') and option[2:].strip():
+ x_args = option[2:]
+ else:
+ x_args = pop_value()
+ try:
+ options.compile_time_env = Options.parse_compile_time_env(
+ x_args, current_settings=options.compile_time_env)
+ except ValueError as e:
+ sys.stderr.write("Error in compile-time-env: %s\n" % e.args[0])
+ sys.exit(1)
+ elif option == "--module-name":
+ options.module_name = pop_value()
+ elif option in ('-M', '--depfile'):
+ options.depfile = True
+ elif option.startswith('--debug'):
+ option = option[2:].replace('-', '_')
+ from . import DebugFlags
+ if option in dir(DebugFlags):
+ setattr(DebugFlags, option, True)
+ else:
+ sys.stderr.write("Unknown debug flag: %s\n" % option)
+ bad_usage()
+ elif option in ('-h', '--help'):
+ sys.stdout.write(usage)
+ sys.exit(0)
+ else:
+ sys.stderr.write(usage)
+ sys.stderr.write("Unknown compiler flag: %s\n" % option)
+ sys.exit(1)
+ else:
+ sources.append(pop_arg())
+
+ if pending_arg:
+ bad_usage()
+
+ if options.use_listing_file and len(sources) > 1:
+ sys.stderr.write(
+ "cython: Only one source file allowed when using -o\n")
+ sys.exit(1)
+ if len(sources) == 0 and not options.show_version:
+ bad_usage()
+ if Options.embed and len(sources) > 1:
+ sys.stderr.write(
+ "cython: Only one source file allowed when using --embed\n")
+ sys.exit(1)
+ if options.module_name:
+ if options.timestamps:
+ sys.stderr.write(
+ "cython: Cannot use --module-name with --timestamps\n")
+ sys.exit(1)
+ if len(sources) > 1:
+ sys.stderr.write(
+ "cython: Only one source file allowed when using --module-name\n")
+ sys.exit(1)
+ return options, sources
diff --git a/contrib/tools/cython/Cython/Compiler/Code.pxd b/contrib/tools/cython/Cython/Compiler/Code.pxd
new file mode 100644
index 0000000000..acad0c1cf4
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Code.pxd
@@ -0,0 +1,124 @@
+
+from __future__ import absolute_import
+
+cimport cython
+from ..StringIOTree cimport StringIOTree
+
+
+cdef class UtilityCodeBase(object):
+ cpdef format_code(self, code_string, replace_empty_lines=*)
+
+
+cdef class UtilityCode(UtilityCodeBase):
+ cdef public object name
+ cdef public object proto
+ cdef public object impl
+ cdef public object init
+ cdef public object cleanup
+ cdef public object proto_block
+ cdef public object requires
+ cdef public dict _cache
+ cdef public list specialize_list
+ cdef public object file
+
+ cpdef none_or_sub(self, s, context)
+
+
+cdef class FunctionState:
+ cdef public set names_taken
+ cdef public object owner
+ cdef public object scope
+
+ cdef public object error_label
+ cdef public size_t label_counter
+ cdef public set labels_used
+ cdef public object return_label
+ cdef public object continue_label
+ cdef public object break_label
+ cdef public list yield_labels
+
+ cdef public object return_from_error_cleanup_label # not used in __init__ ?
+
+ cdef public object exc_vars
+ cdef public object current_except
+ cdef public bint in_try_finally
+ cdef public bint can_trace
+ cdef public bint gil_owned
+
+ cdef public list temps_allocated
+ cdef public dict temps_free
+ cdef public dict temps_used_type
+ cdef public set zombie_temps
+ cdef public size_t temp_counter
+ cdef public list collect_temps_stack
+
+ cdef public object closure_temps
+ cdef public bint should_declare_error_indicator
+ cdef public bint uses_error_indicator
+
+ @cython.locals(n=size_t)
+ cpdef new_label(self, name=*)
+ cpdef tuple get_loop_labels(self)
+ cpdef set_loop_labels(self, labels)
+ cpdef tuple get_all_labels(self)
+ cpdef set_all_labels(self, labels)
+ cpdef start_collecting_temps(self)
+ cpdef stop_collecting_temps(self)
+
+ cpdef list temps_in_use(self)
+
+cdef class IntConst:
+ cdef public object cname
+ cdef public object value
+ cdef public bint is_long
+
+cdef class PyObjectConst:
+ cdef public object cname
+ cdef public object type
+
+cdef class StringConst:
+ cdef public object cname
+ cdef public object text
+ cdef public object escaped_value
+ cdef public dict py_strings
+ cdef public list py_versions
+
+ @cython.locals(intern=bint, is_str=bint, is_unicode=bint)
+ cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*)
+
+## cdef class PyStringConst:
+## cdef public object cname
+## cdef public object encoding
+## cdef public bint is_str
+## cdef public bint is_unicode
+## cdef public bint intern
+
+#class GlobalState(object):
+
+#def funccontext_property(name):
+
+cdef class CCodeWriter(object):
+ cdef readonly StringIOTree buffer
+ cdef readonly list pyclass_stack
+ cdef readonly object globalstate
+ cdef readonly object funcstate
+ cdef object code_config
+ cdef object last_pos
+ cdef object last_marked_pos
+ cdef Py_ssize_t level
+ cdef public Py_ssize_t call_level # debug-only, see Nodes.py
+ cdef bint bol
+
+ cpdef write(self, s)
+ cpdef put(self, code)
+ cpdef put_safe(self, code)
+ cpdef putln(self, code=*, bint safe=*)
+ @cython.final
+ cdef increase_indent(self)
+ @cython.final
+ cdef decrease_indent(self)
+
+
+cdef class PyrexCodeWriter:
+ cdef public object f
+ cdef public Py_ssize_t level
diff --git a/contrib/tools/cython/Cython/Compiler/Code.py b/contrib/tools/cython/Cython/Compiler/Code.py
new file mode 100644
index 0000000000..d0b4756e59
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Code.py
@@ -0,0 +1,2597 @@
+# cython: language_level = 2
+# cython: auto_pickle=False
+#
+# Code output module
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(os=object, re=object, operator=object, textwrap=object,
+ Template=object, Naming=object, Options=object, StringEncoding=object,
+ Utils=object, SourceDescriptor=object, StringIOTree=object,
+ DebugFlags=object, basestring=object, defaultdict=object,
+ closing=object, partial=object)
+
+import os
+import re
+import shutil
+import sys
+import operator
+import textwrap
+from string import Template
+from functools import partial
+from contextlib import closing
+from collections import defaultdict
+
+try:
+ import hashlib
+except ImportError:
+ import md5 as hashlib
+
+from . import Naming
+from . import Options
+from . import DebugFlags
+from . import StringEncoding
+from . import Version
+from .. import Utils
+from .Scanning import SourceDescriptor
+from ..StringIOTree import StringIOTree
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ from builtins import str as basestring
+
+KEYWORDS_MUST_BE_BYTES = sys.version_info < (2, 7)
+
+
+non_portable_builtins_map = {
+ # builtins that have different names in different Python versions
+ 'bytes' : ('PY_MAJOR_VERSION < 3', 'str'),
+ 'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'),
+ 'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'),
+ 'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'),
+ 'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'),
+}
+
+ctypedef_builtins_map = {
+ # types of builtins in "ctypedef class" statements which we don't
+ # import either because the names conflict with C types or because
+ # the type simply is not exposed.
+ 'py_int' : '&PyInt_Type',
+ 'py_long' : '&PyLong_Type',
+ 'py_float' : '&PyFloat_Type',
+ 'wrapper_descriptor' : '&PyWrapperDescr_Type',
+}
+
+basicsize_builtins_map = {
+ # builtins whose type has a different tp_basicsize than sizeof(...)
+ 'PyTypeObject': 'PyHeapTypeObject',
+}
+
+uncachable_builtins = [
+ # Global/builtin names that cannot be cached because they may or may not
+ # be available at import time, for various reasons:
+ ## - Py3.7+
+ 'breakpoint', # might deserve an implementation in Cython
+ ## - Py3.4+
+ '__loader__',
+ '__spec__',
+ ## - Py3+
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'ModuleNotFoundError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ResourceWarning',
+ #'StopAsyncIteration', # backported
+ 'TimeoutError',
+ '__build_class__',
+ 'ascii', # might deserve an implementation in Cython
+ #'exec', # implemented in Cython
+ ## - Py2.7+
+ 'memoryview',
+ ## - platform specific
+ 'WindowsError',
+ ## - others
+ '_', # e.g. used by gettext
+]
+
+special_py_methods = set([
+ '__cinit__', '__dealloc__', '__richcmp__', '__next__',
+ '__await__', '__aiter__', '__anext__',
+ '__getreadbuffer__', '__getwritebuffer__', '__getsegcount__',
+ '__getcharbuffer__', '__getbuffer__', '__releasebuffer__'
+])
+
+modifier_output_mapper = {
+ 'inline': 'CYTHON_INLINE'
+}.get
+
+
+class IncludeCode(object):
+ """
+ An include file and/or verbatim C code to be included in the
+ generated sources.
+ """
+ # attributes:
+ #
+ # pieces {order: unicode}: pieces of C code to be generated.
+ # For the included file, the key "order" is zero.
+ # For verbatim include code, the "order" is the "order"
+ # attribute of the original IncludeCode where this piece
+ # of C code was first added. This is needed to prevent
+ # duplication if the same include code is found through
+ # multiple cimports.
+ # location int: where to put this include in the C sources, one
+ # of the constants INITIAL, EARLY, LATE
+ # order int: sorting order (automatically set by increasing counter)
+
+ # Constants for location. If the same include occurs with different
+ # locations, the earliest one takes precedense.
+ INITIAL = 0
+ EARLY = 1
+ LATE = 2
+
+ counter = 1 # Counter for "order"
+
+ def __init__(self, include=None, verbatim=None, late=True, initial=False):
+ self.order = self.counter
+ type(self).counter += 1
+ self.pieces = {}
+
+ if include:
+ if include[0] == '<' and include[-1] == '>':
+ self.pieces[0] = u'#include {0}'.format(include)
+ late = False # system include is never late
+ else:
+ self.pieces[0] = u'#include "{0}"'.format(include)
+
+ if verbatim:
+ self.pieces[self.order] = verbatim
+
+ if initial:
+ self.location = self.INITIAL
+ elif late:
+ self.location = self.LATE
+ else:
+ self.location = self.EARLY
+
+ def dict_update(self, d, key):
+ """
+ Insert `self` in dict `d` with key `key`. If that key already
+ exists, update the attributes of the existing value with `self`.
+ """
+ if key in d:
+ other = d[key]
+ other.location = min(self.location, other.location)
+ other.pieces.update(self.pieces)
+ else:
+ d[key] = self
+
+ def sortkey(self):
+ return self.order
+
+ def mainpiece(self):
+ """
+ Return the main piece of C code, corresponding to the include
+ file. If there was no include file, return None.
+ """
+ return self.pieces.get(0)
+
+ def write(self, code):
+ # Write values of self.pieces dict, sorted by the keys
+ for k in sorted(self.pieces):
+ code.putln(self.pieces[k])
+
+
+def get_utility_dir():
+ # make this a function and not global variables:
+ # http://trac.cython.org/cython_trac/ticket/475
+ Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ return os.path.join(Cython_dir, "Utility")
+
+
+class UtilityCodeBase(object):
+ """
+ Support for loading utility code from a file.
+
+ Code sections in the file can be specified as follows:
+
+ ##### MyUtility.proto #####
+
+ [proto declarations]
+
+ ##### MyUtility.init #####
+
+ [code run at module initialization]
+
+ ##### MyUtility #####
+ #@requires: MyOtherUtility
+ #@substitute: naming
+
+ [definitions]
+
+ for prototypes and implementation respectively. For non-python or
+ -cython files backslashes should be used instead. 5 to 30 comment
+ characters may be used on either side.
+
+ If the @cname decorator is not used and this is a CythonUtilityCode,
+ one should pass in the 'name' keyword argument to be used for name
+ mangling of such entries.
+ """
+
+ is_cython_utility = False
+ _utility_cache = {}
+
+ @classmethod
+ def _add_utility(cls, utility, type, lines, begin_lineno, tags=None):
+ if utility is None:
+ return
+
+ code = '\n'.join(lines)
+ if tags and 'substitute' in tags and tags['substitute'] == set(['naming']):
+ del tags['substitute']
+ try:
+ code = Template(code).substitute(vars(Naming))
+ except (KeyError, ValueError) as e:
+ raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % (
+ type, begin_lineno, e))
+
+ # remember correct line numbers at least until after templating
+ code = '\n' * begin_lineno + code
+
+ if type == 'proto':
+ utility[0] = code
+ elif type == 'impl':
+ utility[1] = code
+ else:
+ all_tags = utility[2]
+ if KEYWORDS_MUST_BE_BYTES:
+ type = type.encode('ASCII')
+ all_tags[type] = code
+
+ if tags:
+ all_tags = utility[2]
+ for name, values in tags.items():
+ if KEYWORDS_MUST_BE_BYTES:
+ name = name.encode('ASCII')
+ all_tags.setdefault(name, set()).update(values)
+
+ @classmethod
+ def load_utilities_from_file(cls, path):
+ utilities = cls._utility_cache.get(path)
+ if utilities:
+ return utilities
+
+ filename = os.path.join(get_utility_dir(), path)
+ _, ext = os.path.splitext(path)
+ if ext in ('.pyx', '.py', '.pxd', '.pxi'):
+ comment = '#'
+ strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
+ rstrip = StringEncoding._unicode.rstrip
+ else:
+ comment = '/'
+ strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '')
+ rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1')
+ match_special = re.compile(
+ (r'^%(C)s{5,30}\s*(?P<name>(?:\w|\.)+)\s*%(C)s{5,30}|'
+ r'^%(C)s+@(?P<tag>\w+)\s*:\s*(?P<value>(?:\w|[.:])+)') %
+ {'C': comment}).match
+ match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match
+
+ with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
+ all_lines = f.readlines()
+
+ utilities = defaultdict(lambda: [None, None, {}])
+ lines = []
+ tags = defaultdict(set)
+ utility = type = None
+ begin_lineno = 0
+
+ for lineno, line in enumerate(all_lines):
+ m = match_special(line)
+ if m:
+ if m.group('name'):
+ cls._add_utility(utility, type, lines, begin_lineno, tags)
+
+ begin_lineno = lineno + 1
+ del lines[:]
+ tags.clear()
+
+ name = m.group('name')
+ mtype = match_type(name)
+ if mtype:
+ name, type = mtype.groups()
+ else:
+ type = 'impl'
+ utility = utilities[name]
+ else:
+ tags[m.group('tag')].add(m.group('value'))
+ lines.append('') # keep line number correct
+ else:
+ lines.append(rstrip(strip_comments(line)))
+
+ if utility is None:
+ raise ValueError("Empty utility code file")
+
+ # Don't forget to add the last utility code
+ cls._add_utility(utility, type, lines, begin_lineno, tags)
+
+ utilities = dict(utilities) # un-defaultdict-ify
+ cls._utility_cache[path] = utilities
+ return utilities
+
+ @classmethod
+ def load(cls, util_code_name, from_file=None, **kwargs):
+ """
+ Load utility code from a file specified by from_file (relative to
+ Cython/Utility) and name util_code_name. If from_file is not given,
+ load it from the file util_code_name.*. There should be only one
+ file matched by this pattern.
+ """
+ if '::' in util_code_name:
+ from_file, util_code_name = util_code_name.rsplit('::', 1)
+ if not from_file:
+ utility_dir = get_utility_dir()
+ prefix = util_code_name + '.'
+ try:
+ listing = os.listdir(utility_dir)
+ except OSError:
+ # XXX the code below assumes as 'zipimport.zipimporter' instance
+ # XXX should be easy to generalize, but too lazy right now to write it
+ import zipfile
+ global __loader__
+ loader = __loader__
+ archive = loader.archive
+ with closing(zipfile.ZipFile(archive)) as fileobj:
+ listing = [os.path.basename(name)
+ for name in fileobj.namelist()
+ if os.path.join(archive, name).startswith(utility_dir)]
+ files = [filename for filename in listing
+ if filename.startswith(prefix)]
+ if not files:
+ raise ValueError("No match found for utility code " + util_code_name)
+ if len(files) > 1:
+ raise ValueError("More than one filename match found for utility code " + util_code_name)
+ from_file = files[0]
+
+ utilities = cls.load_utilities_from_file(from_file)
+ proto, impl, tags = utilities[util_code_name]
+
+ if tags:
+ orig_kwargs = kwargs.copy()
+ for name, values in tags.items():
+ if name in kwargs:
+ continue
+ # only pass lists when we have to: most argument expect one value or None
+ if name == 'requires':
+ if orig_kwargs:
+ values = [cls.load(dep, from_file, **orig_kwargs)
+ for dep in sorted(values)]
+ else:
+ # dependencies are rarely unique, so use load_cached() when we can
+ values = [cls.load_cached(dep, from_file)
+ for dep in sorted(values)]
+ elif not values:
+ values = None
+ elif len(values) == 1:
+ values = list(values)[0]
+ kwargs[name] = values
+
+ if proto is not None:
+ kwargs['proto'] = proto
+ if impl is not None:
+ kwargs['impl'] = impl
+
+ if 'name' not in kwargs:
+ kwargs['name'] = util_code_name
+
+ if 'file' not in kwargs and from_file:
+ kwargs['file'] = from_file
+ return cls(**kwargs)
+
+ @classmethod
+ def load_cached(cls, utility_code_name, from_file=None, __cache={}):
+ """
+ Calls .load(), but using a per-type cache based on utility name and file name.
+ """
+ key = (cls, from_file, utility_code_name)
+ try:
+ return __cache[key]
+ except KeyError:
+ pass
+ code = __cache[key] = cls.load(utility_code_name, from_file)
+ return code
+
+ @classmethod
+ def load_as_string(cls, util_code_name, from_file=None, **kwargs):
+ """
+ Load a utility code as a string. Returns (proto, implementation)
+ """
+ util = cls.load(util_code_name, from_file, **kwargs)
+ proto, impl = util.proto, util.impl
+ return util.format_code(proto), util.format_code(impl)
+
+ def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub):
+ """
+ Format a code section for output.
+ """
+ if code_string:
+ code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n'
+ return code_string
+
+ def __str__(self):
+ return "<%s(%s)>" % (type(self).__name__, self.name)
+
+ def get_tree(self, **kwargs):
+ pass
+
+ def __deepcopy__(self, memodict=None):
+ # No need to deep-copy utility code since it's essentially immutable.
+ return self
+
+
+class UtilityCode(UtilityCodeBase):
+ """
+ Stores utility code to add during code generation.
+
+ See GlobalState.put_utility_code.
+
+ hashes/equals by instance
+
+ proto C prototypes
+ impl implementation code
+ init code to call on module initialization
+ requires utility code dependencies
+ proto_block the place in the resulting file where the prototype should
+ end up
+ name name of the utility code (or None)
+ file filename of the utility code file this utility was loaded
+ from (or None)
+ """
+
+ def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
+ proto_block='utility_code_proto', name=None, file=None):
+ # proto_block: Which code block to dump prototype in. See GlobalState.
+ self.proto = proto
+ self.impl = impl
+ self.init = init
+ self.cleanup = cleanup
+ self.requires = requires
+ self._cache = {}
+ self.specialize_list = []
+ self.proto_block = proto_block
+ self.name = name
+ self.file = file
+
+ def __hash__(self):
+ return hash((self.proto, self.impl))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ self_type, other_type = type(self), type(other)
+ if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)):
+ return False
+
+ self_proto = getattr(self, 'proto', None)
+ other_proto = getattr(other, 'proto', None)
+ return (self_proto, self.impl) == (other_proto, other.impl)
+
+ def none_or_sub(self, s, context):
+ """
+ Format a string in this utility code with context. If None, do nothing.
+ """
+ if s is None:
+ return None
+ return s % context
+
+ def specialize(self, pyrex_type=None, **data):
+ # Dicts aren't hashable...
+ name = self.name
+ if pyrex_type is not None:
+ data['type'] = pyrex_type.empty_declaration_code()
+ data['type_name'] = pyrex_type.specialization_name()
+ name = "%s[%s]" % (name, data['type_name'])
+ key = tuple(sorted(data.items()))
+ try:
+ return self._cache[key]
+ except KeyError:
+ if self.requires is None:
+ requires = None
+ else:
+ requires = [r.specialize(data) for r in self.requires]
+
+ s = self._cache[key] = UtilityCode(
+ self.none_or_sub(self.proto, data),
+ self.none_or_sub(self.impl, data),
+ self.none_or_sub(self.init, data),
+ self.none_or_sub(self.cleanup, data),
+ requires,
+ self.proto_block,
+ name,
+ )
+
+ self.specialize_list.append(s)
+ return s
+
+ def inject_string_constants(self, impl, output):
+ """Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
+ """
+ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
+ return False, impl
+
+ replacements = {}
+ def externalise(matchobj):
+ key = matchobj.groups()
+ try:
+ cname = replacements[key]
+ except KeyError:
+ str_type, name = key
+ cname = replacements[key] = output.get_py_string_const(
+ StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
+ return cname
+
+ impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
+ assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
+ return True, impl
+
+ def inject_unbound_methods(self, impl, output):
+ """Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname.
+ """
+ if 'CALL_UNBOUND_METHOD(' not in impl:
+ return False, impl
+
+ def externalise(matchobj):
+ type_cname, method_name, obj_cname, args = matchobj.groups()
+ args = [arg.strip() for arg in args[1:].split(',')] if args else []
+ assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
+ return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
+
+ impl = re.sub(
+ r'CALL_UNBOUND_METHOD\('
+ r'([a-zA-Z_]+),' # type cname
+ r'\s*"([^"]+)",' # method name
+ r'\s*([^),]+)' # object cname
+ r'((?:,\s*[^),]+)*)' # args*
+ r'\)', externalise, impl)
+ assert 'CALL_UNBOUND_METHOD(' not in impl
+
+ return True, impl
+
+ def wrap_c_strings(self, impl):
+ """Replace CSTRING('''xyz''') by a C compatible string
+ """
+ if 'CSTRING(' not in impl:
+ return impl
+
+ def split_string(matchobj):
+ content = matchobj.group(1).replace('"', '\042')
+ return ''.join(
+ '"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1]
+ for line in content.splitlines())
+
+ impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl)
+ assert 'CSTRING(' not in impl
+ return impl
+
+ def put_code(self, output):
+ if self.requires:
+ for dependency in self.requires:
+ output.use_utility_code(dependency)
+ if self.proto:
+ writer = output[self.proto_block]
+ writer.putln("/* %s.proto */" % self.name)
+ writer.put_or_include(
+ self.format_code(self.proto), '%s_proto' % self.name)
+ if self.impl:
+ impl = self.format_code(self.wrap_c_strings(self.impl))
+ is_specialised1, impl = self.inject_string_constants(impl, output)
+ is_specialised2, impl = self.inject_unbound_methods(impl, output)
+ writer = output['utility_code_def']
+ writer.putln("/* %s */" % self.name)
+ if not (is_specialised1 or is_specialised2):
+ # no module specific adaptations => can be reused
+ writer.put_or_include(impl, '%s_impl' % self.name)
+ else:
+ writer.put(impl)
+ if self.init:
+ writer = output['init_globals']
+ writer.putln("/* %s.init */" % self.name)
+ if isinstance(self.init, basestring):
+ writer.put(self.format_code(self.init))
+ else:
+ self.init(writer, output.module_pos)
+ writer.putln(writer.error_goto_if_PyErr(output.module_pos))
+ writer.putln()
+ if self.cleanup and Options.generate_cleanup_code:
+ writer = output['cleanup_globals']
+ writer.putln("/* %s.cleanup */" % self.name)
+ if isinstance(self.cleanup, basestring):
+ writer.put_or_include(
+ self.format_code(self.cleanup),
+ '%s_cleanup' % self.name)
+ else:
+ self.cleanup(writer, output.module_pos)
+
+
+def sub_tempita(s, context, file=None, name=None):
+ "Run tempita on string s with given context."
+ if not s:
+ return None
+
+ if file:
+ context['__name'] = "%s:%s" % (file, name)
+ elif name:
+ context['__name'] = name
+
+ from ..Tempita import sub
+ return sub(s, **context)
+
+
+class TempitaUtilityCode(UtilityCode):
+ def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs):
+ if context is None:
+ context = {}
+ proto = sub_tempita(proto, context, file, name)
+ impl = sub_tempita(impl, context, file, name)
+ init = sub_tempita(init, context, file, name)
+ super(TempitaUtilityCode, self).__init__(
+ proto, impl, init=init, name=name, file=file, **kwargs)
+
+ @classmethod
+ def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}):
+ context_key = tuple(sorted(context.items())) if context else None
+ assert hash(context_key) is not None # raise TypeError if not hashable
+ key = (cls, from_file, utility_code_name, context_key)
+ try:
+ return __cache[key]
+ except KeyError:
+ pass
+ code = __cache[key] = cls.load(utility_code_name, from_file, context=context)
+ return code
+
+ def none_or_sub(self, s, context):
+ """
+ Format a string in this utility code with context. If None, do nothing.
+ """
+ if s is None:
+ return None
+ return sub_tempita(s, context, self.file, self.name)
+
+
+class LazyUtilityCode(UtilityCodeBase):
+ """
+ Utility code that calls a callback with the root code writer when
+ available. Useful when you only have 'env' but not 'code'.
+ """
+ __name__ = '<lazy>'
+ requires = None
+
+ def __init__(self, callback):
+ self.callback = callback
+
+ def put_code(self, globalstate):
+ utility = self.callback(globalstate.rootwriter)
+ globalstate.use_utility_code(utility)
+
+
+class FunctionState(object):
+ # return_label string function return point label
+ # error_label string error catch point label
+ # continue_label string loop continue point label
+ # break_label string loop break point label
+ # return_from_error_cleanup_label string
+ # label_counter integer counter for naming labels
+ # in_try_finally boolean inside try of try...finally
+ # exc_vars (string * 3) exception variables for reraise, or None
+ # can_trace boolean line tracing is supported in the current context
+ # scope Scope the scope object of the current function
+
+ # Not used for now, perhaps later
+ def __init__(self, owner, names_taken=set(), scope=None):
+ self.names_taken = names_taken
+ self.owner = owner
+ self.scope = scope
+
+ self.error_label = None
+ self.label_counter = 0
+ self.labels_used = set()
+ self.return_label = self.new_label()
+ self.new_error_label()
+ self.continue_label = None
+ self.break_label = None
+ self.yield_labels = []
+
+ self.in_try_finally = 0
+ self.exc_vars = None
+ self.current_except = None
+ self.can_trace = False
+ self.gil_owned = True
+
+ self.temps_allocated = [] # of (name, type, manage_ref, static)
+ self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
+ self.temps_used_type = {} # name -> (type, manage_ref)
+ self.zombie_temps = set() # temps that must not be reused after release
+ self.temp_counter = 0
+ self.closure_temps = None
+
+ # This is used to collect temporaries, useful to find out which temps
+ # need to be privatized in parallel sections
+ self.collect_temps_stack = []
+
+ # This is used for the error indicator, which needs to be local to the
+ # function. It used to be global, which relies on the GIL being held.
+ # However, exceptions may need to be propagated through 'nogil'
+ # sections, in which case we introduce a race condition.
+ self.should_declare_error_indicator = False
+ self.uses_error_indicator = False
+
+ # safety checks
+
+ def validate_exit(self):
+ # validate that all allocated temps have been freed
+ if self.temps_allocated:
+ leftovers = self.temps_in_use()
+ if leftovers:
+ msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
+ '%s [%s]' % (name, ctype)
+ for name, ctype, is_pytemp in sorted(leftovers)]),
+ )
+ #print(msg)
+ raise RuntimeError(msg)
+
+ # labels
+
+ def new_label(self, name=None):
+ n = self.label_counter
+ self.label_counter = n + 1
+ label = "%s%d" % (Naming.label_prefix, n)
+ if name is not None:
+ label += '_' + name
+ return label
+
+ def new_yield_label(self, expr_type='yield'):
+ label = self.new_label('resume_from_%s' % expr_type)
+ num_and_label = (len(self.yield_labels) + 1, label)
+ self.yield_labels.append(num_and_label)
+ return num_and_label
+
+ def new_error_label(self):
+ old_err_lbl = self.error_label
+ self.error_label = self.new_label('error')
+ return old_err_lbl
+
+ def get_loop_labels(self):
+ return (
+ self.continue_label,
+ self.break_label)
+
+ def set_loop_labels(self, labels):
+ (self.continue_label,
+ self.break_label) = labels
+
+ def new_loop_labels(self):
+ old_labels = self.get_loop_labels()
+ self.set_loop_labels(
+ (self.new_label("continue"),
+ self.new_label("break")))
+ return old_labels
+
+ def get_all_labels(self):
+ return (
+ self.continue_label,
+ self.break_label,
+ self.return_label,
+ self.error_label)
+
+ def set_all_labels(self, labels):
+ (self.continue_label,
+ self.break_label,
+ self.return_label,
+ self.error_label) = labels
+
+ def all_new_labels(self):
+ old_labels = self.get_all_labels()
+ new_labels = []
+ for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
+ if old_label:
+ new_labels.append(self.new_label(name))
+ else:
+ new_labels.append(old_label)
+ self.set_all_labels(new_labels)
+ return old_labels
+
+ def use_label(self, lbl):
+ self.labels_used.add(lbl)
+
+ def label_used(self, lbl):
+ return lbl in self.labels_used
+
+ # temp handling
+
+ def allocate_temp(self, type, manage_ref, static=False, reusable=True):
+ """
+ Allocates a temporary (which may create a new one or get a previously
+ allocated and released one of the same type). Type is simply registered
+ and handed back, but will usually be a PyrexType.
+
+ If type.is_pyobject, manage_ref comes into play. If manage_ref is set to
+ True, the temp will be decref-ed on return statements and in exception
+ handling clauses. Otherwise the caller has to deal with any reference
+ counting of the variable.
+
+ If not type.is_pyobject, then manage_ref will be ignored, but it
+ still has to be passed. It is recommended to pass False by convention
+ if it is known that type will never be a Python object.
+
+ static=True marks the temporary declaration with "static".
+ This is only used when allocating backing store for a module-level
+ C array literals.
+
+ if reusable=False, the temp will not be reused after release.
+
+ A C string referring to the variable is returned.
+ """
+ if type.is_const and not type.is_reference:
+ type = type.const_base_type
+ elif type.is_reference and not type.is_fake_reference:
+ type = type.ref_base_type
+ elif type.is_cfunction:
+ from . import PyrexTypes
+ type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
+ if not type.is_pyobject and not type.is_memoryviewslice:
+ # Make manage_ref canonical, so that manage_ref will always mean
+ # a decref is needed.
+ manage_ref = False
+
+ freelist = self.temps_free.get((type, manage_ref))
+ if reusable and freelist is not None and freelist[0]:
+ result = freelist[0].pop()
+ freelist[1].remove(result)
+ else:
+ while True:
+ self.temp_counter += 1
+ result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
+ if result not in self.names_taken: break
+ self.temps_allocated.append((result, type, manage_ref, static))
+ if not reusable:
+ self.zombie_temps.add(result)
+ self.temps_used_type[result] = (type, manage_ref)
+ if DebugFlags.debug_temp_code_comments:
+ self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
+
+ if self.collect_temps_stack:
+ self.collect_temps_stack[-1].add((result, type))
+
+ return result
+
+ def release_temp(self, name):
+ """
+ Releases a temporary so that it can be reused by other code needing
+ a temp of the same type.
+ """
+ type, manage_ref = self.temps_used_type[name]
+ freelist = self.temps_free.get((type, manage_ref))
+ if freelist is None:
+ freelist = ([], set()) # keep order in list and make lookups in set fast
+ self.temps_free[(type, manage_ref)] = freelist
+ if name in freelist[1]:
+ raise RuntimeError("Temp %s freed twice!" % name)
+ if name not in self.zombie_temps:
+ freelist[0].append(name)
+ freelist[1].add(name)
+ if DebugFlags.debug_temp_code_comments:
+ self.owner.putln("/* %s released %s*/" % (
+ name, " - zombie" if name in self.zombie_temps else ""))
+
+ def temps_in_use(self):
+ """Return a list of (cname,type,manage_ref) tuples of temp names and their type
+ that are currently in use.
+ """
+ used = []
+ for name, type, manage_ref, static in self.temps_allocated:
+ freelist = self.temps_free.get((type, manage_ref))
+ if freelist is None or name not in freelist[1]:
+ used.append((name, type, manage_ref and type.is_pyobject))
+ return used
+
+ def temps_holding_reference(self):
+ """Return a list of (cname,type) tuples of temp names and their type
+ that are currently in use. This includes only temps of a
+ Python object type which owns its reference.
+ """
+ return [(name, type)
+ for name, type, manage_ref in self.temps_in_use()
+ if manage_ref and type.is_pyobject]
+
+ def all_managed_temps(self):
+ """Return a list of (cname, type) tuples of refcount-managed Python objects.
+ """
+ return [(cname, type)
+ for cname, type, manage_ref, static in self.temps_allocated
+ if manage_ref]
+
+ def all_free_managed_temps(self):
+ """Return a list of (cname, type) tuples of refcount-managed Python
+ objects that are not currently in use. This is used by
+ try-except and try-finally blocks to clean up temps in the
+ error case.
+ """
+ return sorted([ # Enforce deterministic order.
+ (cname, type)
+ for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
+ for cname in freelist[0]
+ ])
+
+ def start_collecting_temps(self):
+ """
+ Useful to find out which temps were used in a code block
+ """
+ self.collect_temps_stack.append(set())
+
+ def stop_collecting_temps(self):
+ return self.collect_temps_stack.pop()
+
+ def init_closure_temps(self, scope):
+ self.closure_temps = ClosureTempAllocator(scope)
+
+
+class NumConst(object):
+ """Global info about a Python number constant held by GlobalState.
+
+ cname string
+ value string
+ py_type string int, long, float
+ value_code string evaluation code if different from value
+ """
+
+ def __init__(self, cname, value, py_type, value_code=None):
+ self.cname = cname
+ self.value = value
+ self.py_type = py_type
+ self.value_code = value_code or value
+
+
+class PyObjectConst(object):
+ """Global info about a generic constant held by GlobalState.
+ """
+ # cname string
+ # type PyrexType
+
+ def __init__(self, cname, type):
+ self.cname = cname
+ self.type = type
+
+
+cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object,
+ replace_identifier=object, find_alphanums=object)
+possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match
+possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match
+replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub
+find_alphanums = re.compile('([a-zA-Z0-9]+)').findall
+
+class StringConst(object):
+ """Global info about a C string constant held by GlobalState.
+ """
+ # cname string
+ # text EncodedString or BytesLiteral
+ # py_strings {(identifier, encoding) : PyStringConst}
+
+ def __init__(self, cname, text, byte_string):
+ self.cname = cname
+ self.text = text
+ self.escaped_value = StringEncoding.escape_byte_string(byte_string)
+ self.py_strings = None
+ self.py_versions = []
+
+ def add_py_version(self, version):
+ if not version:
+ self.py_versions = [2, 3]
+ elif version not in self.py_versions:
+ self.py_versions.append(version)
+
+ def get_py_string_const(self, encoding, identifier=None,
+ is_str=False, py3str_cstring=None):
+ py_strings = self.py_strings
+ text = self.text
+
+ is_str = bool(identifier or is_str)
+ is_unicode = encoding is None and not is_str
+
+ if encoding is None:
+ # unicode string
+ encoding_key = None
+ else:
+ # bytes or str
+ encoding = encoding.lower()
+ if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
+ encoding = None
+ encoding_key = None
+ else:
+ encoding_key = ''.join(find_alphanums(encoding))
+
+ key = (is_str, is_unicode, encoding_key, py3str_cstring)
+ if py_strings is not None:
+ try:
+ return py_strings[key]
+ except KeyError:
+ pass
+ else:
+ self.py_strings = {}
+
+ if identifier:
+ intern = True
+ elif identifier is None:
+ if isinstance(text, bytes):
+ intern = bool(possible_bytes_identifier(text))
+ else:
+ intern = bool(possible_unicode_identifier(text))
+ else:
+ intern = False
+ if intern:
+ prefix = Naming.interned_prefixes['str']
+ else:
+ prefix = Naming.py_const_prefix
+
+ if encoding_key:
+ encoding_prefix = '_%s' % encoding_key
+ else:
+ encoding_prefix = ''
+
+ pystring_cname = "%s%s%s_%s" % (
+ prefix,
+ (is_str and 's') or (is_unicode and 'u') or 'b',
+ encoding_prefix,
+ self.cname[len(Naming.const_prefix):])
+
+ py_string = PyStringConst(
+ pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern)
+ self.py_strings[key] = py_string
+ return py_string
+
+class PyStringConst(object):
+ """Global info about a Python string constant held by GlobalState.
+ """
+ # cname string
+ # py3str_cstring string
+ # encoding string
+ # intern boolean
+ # is_unicode boolean
+ # is_str boolean
+
+ def __init__(self, cname, encoding, is_unicode, is_str=False,
+ py3str_cstring=None, intern=False):
+ self.cname = cname
+ self.py3str_cstring = py3str_cstring
+ self.encoding = encoding
+ self.is_str = is_str
+ self.is_unicode = is_unicode
+ self.intern = intern
+
+ def __lt__(self, other):
+ return self.cname < other.cname
+
+
+class GlobalState(object):
+ # filename_table {string : int} for finding filename table indexes
+ # filename_list [string] filenames in filename table order
+ # input_file_contents dict contents (=list of lines) of any file that was used as input
+ # to create this output C code. This is
+ # used to annotate the comments.
+ #
+ # utility_codes set IDs of used utility code (to avoid reinsertion)
+ #
+ # declared_cnames {string:Entry} used in a transition phase to merge pxd-declared
+ # constants etc. into the pyx-declared ones (i.e,
+ # check if constants are already added).
+ # In time, hopefully the literals etc. will be
+ # supplied directly instead.
+ #
+ # const_cnames_used dict global counter for unique constant identifiers
+ #
+
+ # parts {string:CCodeWriter}
+
+
+ # interned_strings
+ # consts
+ # interned_nums
+
+ # directives set Temporary variable used to track
+ # the current set of directives in the code generation
+ # process.
+
+ directives = {}
+
+ code_layout = [
+ 'h_code',
+ 'filename_table',
+ 'utility_code_proto_before_types',
+ 'numeric_typedefs', # Let these detailed individual parts stay!,
+ 'complex_type_declarations', # as the proper solution is to make a full DAG...
+ 'type_declarations', # More coarse-grained blocks would simply hide
+ 'utility_code_proto', # the ugliness, not fix it
+ 'module_declarations',
+ 'typeinfo',
+ 'before_global_var',
+ 'global_var',
+ 'string_decls',
+ 'decls',
+ 'late_includes',
+ 'all_the_rest',
+ 'pystring_table',
+ 'cached_builtins',
+ 'cached_constants',
+ 'init_globals',
+ 'init_module',
+ 'cleanup_globals',
+ 'cleanup_module',
+ 'main_method',
+ 'utility_code_def',
+ 'end'
+ ]
+
+
+ def __init__(self, writer, module_node, code_config, common_utility_include_dir=None):
+ self.filename_table = {}
+ self.filename_list = []
+ self.input_file_contents = {}
+ self.utility_codes = set()
+ self.declared_cnames = {}
+ self.in_utility_code_generation = False
+ self.code_config = code_config
+ self.common_utility_include_dir = common_utility_include_dir
+ self.parts = {}
+ self.module_node = module_node # because some utility code generation needs it
+ # (generating backwards-compatible Get/ReleaseBuffer
+
+ self.const_cnames_used = {}
+ self.string_const_index = {}
+ self.dedup_const_index = {}
+ self.pyunicode_ptr_const_index = {}
+ self.num_const_index = {}
+ self.py_constants = []
+ self.cached_cmethods = {}
+ self.initialised_constants = set()
+
+ writer.set_global_state(self)
+ self.rootwriter = writer
+
+ def initialize_main_c_code(self):
+ rootwriter = self.rootwriter
+ for part in self.code_layout:
+ self.parts[part] = rootwriter.insertion_point()
+
+ if not Options.cache_builtins:
+ del self.parts['cached_builtins']
+ else:
+ w = self.parts['cached_builtins']
+ w.enter_cfunc_scope()
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {")
+
+ w = self.parts['cached_constants']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {")
+ w.put_declare_refcount_context()
+ w.put_setup_refcount_context("__Pyx_InitCachedConstants")
+
+ w = self.parts['init_globals']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {")
+
+ if not Options.generate_cleanup_code:
+ del self.parts['cleanup_globals']
+ else:
+ w = self.parts['cleanup_globals']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {")
+
+ code = self.parts['utility_code_proto']
+ code.putln("")
+ code.putln("/* --- Runtime support code (head) --- */")
+
+ code = self.parts['utility_code_def']
+ if self.code_config.emit_linenums:
+ code.write('\n#line 1 "cython_utility"\n')
+ code.putln("")
+ code.putln("/* --- Runtime support code --- */")
+
+ def finalize_main_c_code(self):
+ self.close_global_decls()
+
+ #
+ # utility_code_def
+ #
+ code = self.parts['utility_code_def']
+ util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c")
+ code.put(util.format_code(util.impl))
+ code.putln("")
+
+ def __getitem__(self, key):
+ return self.parts[key]
+
+ #
+ # Global constants, interned objects, etc.
+ #
+ def close_global_decls(self):
+ # This is called when it is known that no more global declarations will
+ # declared.
+ self.generate_const_declarations()
+ if Options.cache_builtins:
+ w = self.parts['cached_builtins']
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ w = self.parts['cached_constants']
+ w.put_finish_refcount_context()
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.put_finish_refcount_context()
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ w = self.parts['init_globals']
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ if Options.generate_cleanup_code:
+ w = self.parts['cleanup_globals']
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ if Options.generate_cleanup_code:
+ w = self.parts['cleanup_module']
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ def put_pyobject_decl(self, entry):
+ self['global_var'].putln("static PyObject *%s;" % entry.cname)
+
+ # constant handling at code generation time
+
+ def get_cached_constants_writer(self, target=None):
+ if target is not None:
+ if target in self.initialised_constants:
+ # Return None on second/later calls to prevent duplicate creation code.
+ return None
+ self.initialised_constants.add(target)
+ return self.parts['cached_constants']
+
+ def get_int_const(self, str_value, longness=False):
+ py_type = longness and 'long' or 'int'
+ try:
+ c = self.num_const_index[(str_value, py_type)]
+ except KeyError:
+ c = self.new_num_const(str_value, py_type)
+ return c
+
+ def get_float_const(self, str_value, value_code):
+ try:
+ c = self.num_const_index[(str_value, 'float')]
+ except KeyError:
+ c = self.new_num_const(str_value, 'float', value_code)
+ return c
+
+ def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
+ if dedup_key is not None:
+ const = self.dedup_const_index.get(dedup_key)
+ if const is not None:
+ return const
+ # create a new Python object constant
+ const = self.new_py_const(type, prefix)
+ if cleanup_level is not None \
+ and cleanup_level <= Options.generate_cleanup_code:
+ cleanup_writer = self.parts['cleanup_globals']
+ cleanup_writer.putln('Py_CLEAR(%s);' % const.cname)
+ if dedup_key is not None:
+ self.dedup_const_index[dedup_key] = const
+ return const
+
+ def get_string_const(self, text, py_version=None):
+ # return a C string constant, creating a new one if necessary
+ if text.is_unicode:
+ byte_string = text.utf8encode()
+ else:
+ byte_string = text.byteencode()
+ try:
+ c = self.string_const_index[byte_string]
+ except KeyError:
+ c = self.new_string_const(text, byte_string)
+ c.add_py_version(py_version)
+ return c
+
+ def get_pyunicode_ptr_const(self, text):
+ # return a Py_UNICODE[] constant, creating a new one if necessary
+ assert text.is_unicode
+ try:
+ c = self.pyunicode_ptr_const_index[text]
+ except KeyError:
+ c = self.pyunicode_ptr_const_index[text] = self.new_const_cname()
+ return c
+
+ def get_py_string_const(self, text, identifier=None,
+ is_str=False, unicode_value=None):
+ # return a Python string constant, creating a new one if necessary
+ py3str_cstring = None
+ if is_str and unicode_value is not None \
+ and unicode_value.utf8encode() != text.byteencode():
+ py3str_cstring = self.get_string_const(unicode_value, py_version=3)
+ c_string = self.get_string_const(text, py_version=2)
+ else:
+ c_string = self.get_string_const(text)
+ py_string = c_string.get_py_string_const(
+ text.encoding, identifier, is_str, py3str_cstring)
+ return py_string
+
+ def get_interned_identifier(self, text):
+ return self.get_py_string_const(text, identifier=True)
+
+ def new_string_const(self, text, byte_string):
+ cname = self.new_string_const_cname(byte_string)
+ c = StringConst(cname, text, byte_string)
+ self.string_const_index[byte_string] = c
+ return c
+
+ def new_num_const(self, value, py_type, value_code=None):
+ cname = self.new_num_const_cname(value, py_type)
+ c = NumConst(cname, value, py_type, value_code)
+ self.num_const_index[(value, py_type)] = c
+ return c
+
+ def new_py_const(self, type, prefix=''):
+ cname = self.new_const_cname(prefix)
+ c = PyObjectConst(cname, type)
+ self.py_constants.append(c)
+ return c
+
+ def new_string_const_cname(self, bytes_value):
+ # Create a new globally-unique nice name for a C string constant.
+ value = bytes_value.decode('ASCII', 'ignore')
+ return self.new_const_cname(value=value)
+
+ def new_num_const_cname(self, value, py_type):
+ if py_type == 'long':
+ value += 'L'
+ py_type = 'int'
+ prefix = Naming.interned_prefixes[py_type]
+ cname = "%s%s" % (prefix, value)
+ cname = cname.replace('+', '_').replace('-', 'neg_').replace('.', '_')
+ return cname
+
+ def new_const_cname(self, prefix='', value=''):
+ value = replace_identifier('_', value)[:32].strip('_')
+ used = self.const_cnames_used
+ name_suffix = value
+ while name_suffix in used:
+ counter = used[value] = used[value] + 1
+ name_suffix = '%s_%d' % (value, counter)
+ used[name_suffix] = 1
+ if prefix:
+ prefix = Naming.interned_prefixes[prefix]
+ else:
+ prefix = Naming.const_prefix
+ return "%s%s" % (prefix, name_suffix)
+
+ def get_cached_unbound_method(self, type_cname, method_name):
+ key = (type_cname, method_name)
+ try:
+ cname = self.cached_cmethods[key]
+ except KeyError:
+ cname = self.cached_cmethods[key] = self.new_const_cname(
+ 'umethod', '%s_%s' % (type_cname, method_name))
+ return cname
+
+ def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
+ # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
+ utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
+ self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
+ cache_cname = self.get_cached_unbound_method(type_cname, method_name)
+ args = [obj_cname] + arg_cnames
+ return "__Pyx_%s(&%s, %s)" % (
+ utility_code_name,
+ cache_cname,
+ ', '.join(args),
+ )
+
+ def add_cached_builtin_decl(self, entry):
+ if entry.is_builtin and entry.is_const:
+ if self.should_declare(entry.cname, entry):
+ self.put_pyobject_decl(entry)
+ w = self.parts['cached_builtins']
+ condition = None
+ if entry.name in non_portable_builtins_map:
+ condition, replacement = non_portable_builtins_map[entry.name]
+ w.putln('#if %s' % condition)
+ self.put_cached_builtin_init(
+ entry.pos, StringEncoding.EncodedString(replacement),
+ entry.cname)
+ w.putln('#else')
+ self.put_cached_builtin_init(
+ entry.pos, StringEncoding.EncodedString(entry.name),
+ entry.cname)
+ if condition:
+ w.putln('#endif')
+
+ def put_cached_builtin_init(self, pos, name, cname):
+ w = self.parts['cached_builtins']
+ interned_cname = self.get_interned_identifier(name).cname
+ self.use_utility_code(
+ UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
+ w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % (
+ cname,
+ interned_cname,
+ cname,
+ w.error_goto(pos)))
+
+ def generate_const_declarations(self):
+ self.generate_cached_methods_decls()
+ self.generate_string_constants()
+ self.generate_num_constants()
+ self.generate_object_constant_decls()
+
+ def generate_object_constant_decls(self):
+ consts = [(len(c.cname), c.cname, c)
+ for c in self.py_constants]
+ consts.sort()
+ decls_writer = self.parts['decls']
+ for _, cname, c in consts:
+ decls_writer.putln(
+ "static %s;" % c.type.declaration_code(cname))
+
+ def generate_cached_methods_decls(self):
+ if not self.cached_cmethods:
+ return
+
+ decl = self.parts['decls']
+ init = self.parts['init_globals']
+ cnames = []
+ for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
+ cnames.append(cname)
+ method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
+ decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % (
+ cname, method_name_cname))
+ # split type reference storage as it might not be static
+ init.putln('%s.type = (PyObject*)&%s;' % (
+ cname, type_cname))
+
+ if Options.generate_cleanup_code:
+ cleanup = self.parts['cleanup_globals']
+ for cname in cnames:
+ cleanup.putln("Py_CLEAR(%s.method);" % cname)
+
+ def generate_string_constants(self):
+ c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()]
+ c_consts.sort()
+ py_strings = []
+
+ decls_writer = self.parts['string_decls']
+ for _, cname, c in c_consts:
+ conditional = False
+ if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions):
+ conditional = True
+ decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % (
+ (2 in c.py_versions) and '<' or '>='))
+ decls_writer.putln('static const char %s[] = "%s";' % (
+ cname, StringEncoding.split_string_literal(c.escaped_value)))
+ if conditional:
+ decls_writer.putln("#endif")
+ if c.py_strings is not None:
+ for py_string in c.py_strings.values():
+ py_strings.append((c.cname, len(py_string.cname), py_string))
+
+ for c, cname in sorted(self.pyunicode_ptr_const_index.items()):
+ utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c)
+ if utf16_array:
+ # Narrow and wide representations differ
+ decls_writer.putln("#ifdef Py_UNICODE_WIDE")
+ decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array))
+ if utf16_array:
+ decls_writer.putln("#else")
+ decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array))
+ decls_writer.putln("#endif")
+
+ if py_strings:
+ self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c"))
+ py_strings.sort()
+ w = self.parts['pystring_table']
+ w.putln("")
+ w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname)
+ for c_cname, _, py_string in py_strings:
+ if not py_string.is_str or not py_string.encoding or \
+ py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII',
+ 'UTF8', 'UTF-8'):
+ encoding = '0'
+ else:
+ encoding = '"%s"' % py_string.encoding.lower()
+
+ decls_writer.putln(
+ "static PyObject *%s;" % py_string.cname)
+ if py_string.py3str_cstring:
+ w.putln("#if PY_MAJOR_VERSION >= 3")
+ w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
+ py_string.cname,
+ py_string.py3str_cstring.cname,
+ py_string.py3str_cstring.cname,
+ '0', 1, 0,
+ py_string.intern
+ ))
+ w.putln("#else")
+ w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
+ py_string.cname,
+ c_cname,
+ c_cname,
+ encoding,
+ py_string.is_unicode,
+ py_string.is_str,
+ py_string.intern
+ ))
+ if py_string.py3str_cstring:
+ w.putln("#endif")
+ w.putln("{0, 0, 0, 0, 0, 0, 0}")
+ w.putln("};")
+
+ init_globals = self.parts['init_globals']
+ init_globals.putln(
+ "if (__Pyx_InitStrings(%s) < 0) %s" % (
+ Naming.stringtab_cname,
+ init_globals.error_goto(self.module_pos)))
+
+ def generate_num_constants(self):
+ consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c)
+ for c in self.num_const_index.values()]
+ consts.sort()
+ decls_writer = self.parts['decls']
+ init_globals = self.parts['init_globals']
+ for py_type, _, _, value, value_code, c in consts:
+ cname = c.cname
+ decls_writer.putln("static PyObject *%s;" % cname)
+ if py_type == 'float':
+ function = 'PyFloat_FromDouble(%s)'
+ elif py_type == 'long':
+ function = 'PyLong_FromString((char *)"%s", 0, 0)'
+ elif Utils.long_literal(value):
+ function = 'PyInt_FromString((char *)"%s", 0, 0)'
+ elif len(value.lstrip('-')) > 4:
+ function = "PyInt_FromLong(%sL)"
+ else:
+ function = "PyInt_FromLong(%s)"
+ init_globals.putln('%s = %s; %s' % (
+ cname, function % value_code,
+ init_globals.error_goto_if_null(cname, self.module_pos)))
+
+ # The functions below are there in a transition phase only
+ # and will be deprecated. They are called from Nodes.BlockNode.
+ # The copy&paste duplication is intentional in order to be able
+ # to see quickly how BlockNode worked, until this is replaced.
+
+ def should_declare(self, cname, entry):
+ if cname in self.declared_cnames:
+ other = self.declared_cnames[cname]
+ assert str(entry.type) == str(other.type)
+ assert entry.init == other.init
+ return False
+ else:
+ self.declared_cnames[cname] = entry
+ return True
+
+ #
+ # File name state
+ #
+
+ def lookup_filename(self, source_desc):
+ entry = source_desc.get_filenametable_entry()
+ try:
+ index = self.filename_table[entry]
+ except KeyError:
+ index = len(self.filename_list)
+ self.filename_list.append(source_desc)
+ self.filename_table[entry] = index
+ return index
+
+ def commented_file_contents(self, source_desc):
+ try:
+ return self.input_file_contents[source_desc]
+ except KeyError:
+ pass
+ source_file = source_desc.get_lines(encoding='ASCII',
+ error_handling='ignore')
+ try:
+ F = [u' * ' + line.rstrip().replace(
+ u'*/', u'*[inserted by cython to avoid comment closer]/'
+ ).replace(
+ u'/*', u'/[inserted by cython to avoid comment start]*'
+ )
+ for line in source_file]
+ finally:
+ if hasattr(source_file, 'close'):
+ source_file.close()
+ if not F: F.append(u'')
+ self.input_file_contents[source_desc] = F
+ return F
+
+ #
+ # Utility code state
+ #
+
+ def use_utility_code(self, utility_code):
+ """
+ Adds code to the C file. utility_code should
+ a) implement __eq__/__hash__ for the purpose of knowing whether the same
+ code has already been included
+ b) implement put_code, which takes a globalstate instance
+
+ See UtilityCode.
+ """
+ if utility_code and utility_code not in self.utility_codes:
+ self.utility_codes.add(utility_code)
+ utility_code.put_code(self)
+
+ def use_entry_utility_code(self, entry):
+ if entry is None:
+ return
+ if entry.utility_code:
+ self.use_utility_code(entry.utility_code)
+ if entry.utility_code_definition:
+ self.use_utility_code(entry.utility_code_definition)
+
+
+def funccontext_property(func):
+ name = func.__name__
+ attribute_of = operator.attrgetter(name)
+ def get(self):
+ return attribute_of(self.funcstate)
+ def set(self, value):
+ setattr(self.funcstate, name, value)
+ return property(get, set)
+
+
+class CCodeConfig(object):
+ # emit_linenums boolean write #line pragmas?
+ # emit_code_comments boolean copy the original code into C comments?
+ # c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
+
+ def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
+ self.emit_code_comments = emit_code_comments
+ self.emit_linenums = emit_linenums
+ self.c_line_in_traceback = c_line_in_traceback
+
+
+class CCodeWriter(object):
+ """
+ Utility class to output C code.
+
+ When creating an insertion point one must care about the state that is
+ kept:
+ - formatting state (level, bol) is cloned and used in insertion points
+ as well
+ - labels, temps, exc_vars: One must construct a scope in which these can
+ exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
+ sanity checking and forward compatibility). Created insertion points
+ looses this scope and cannot access it.
+ - marker: Not copied to insertion point
+ - filename_table, filename_list, input_file_contents: All codewriters
+ coming from the same root share the same instances simultaneously.
+ """
+
+ # f file output file
+ # buffer StringIOTree
+
+ # level int indentation level
+ # bol bool beginning of line?
+ # marker string comment to emit before next line
+ # funcstate FunctionState contains state local to a C function used for code
+ # generation (labels and temps state etc.)
+ # globalstate GlobalState contains state global for a C file (input file info,
+ # utility code, declared constants etc.)
+ # pyclass_stack list used during recursive code generation to pass information
+ # about the current class one is in
+ # code_config CCodeConfig configuration options for the C code writer
+
+ @cython.locals(create_from='CCodeWriter')
+ def __init__(self, create_from=None, buffer=None, copy_formatting=False):
+ if buffer is None: buffer = StringIOTree()
+ self.buffer = buffer
+ self.last_pos = None
+ self.last_marked_pos = None
+ self.pyclass_stack = []
+
+ self.funcstate = None
+ self.globalstate = None
+ self.code_config = None
+ self.level = 0
+ self.call_level = 0
+ self.bol = 1
+
+ if create_from is not None:
+ # Use same global state
+ self.set_global_state(create_from.globalstate)
+ self.funcstate = create_from.funcstate
+ # Clone formatting state
+ if copy_formatting:
+ self.level = create_from.level
+ self.bol = create_from.bol
+ self.call_level = create_from.call_level
+ self.last_pos = create_from.last_pos
+ self.last_marked_pos = create_from.last_marked_pos
+
+ def create_new(self, create_from, buffer, copy_formatting):
+ # polymorphic constructor -- very slightly more versatile
+ # than using __class__
+ result = CCodeWriter(create_from, buffer, copy_formatting)
+ return result
+
+ def set_global_state(self, global_state):
+ assert self.globalstate is None # prevent overwriting once it's set
+ self.globalstate = global_state
+ self.code_config = global_state.code_config
+
+ def copyto(self, f):
+ self.buffer.copyto(f)
+
+ def getvalue(self):
+ return self.buffer.getvalue()
+
+ def write(self, s):
+ # also put invalid markers (lineno 0), to indicate that those lines
+ # have no Cython source code correspondence
+ cython_lineno = self.last_marked_pos[1] if self.last_marked_pos else 0
+ self.buffer.markers.extend([cython_lineno] * s.count('\n'))
+ self.buffer.write(s)
+
+ def insertion_point(self):
+ other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True)
+ return other
+
+ def new_writer(self):
+ """
+ Creates a new CCodeWriter connected to the same global state, which
+ can later be inserted using insert.
+ """
+ return CCodeWriter(create_from=self)
+
+ def insert(self, writer):
+ """
+ Inserts the contents of another code writer (created with
+ the same global state) in the current location.
+
+ It is ok to write to the inserted writer also after insertion.
+ """
+ assert writer.globalstate is self.globalstate
+ self.buffer.insert(writer.buffer)
+
+ # Properties delegated to function scope
+ @funccontext_property
+ def label_counter(self): pass
+ @funccontext_property
+ def return_label(self): pass
+ @funccontext_property
+ def error_label(self): pass
+ @funccontext_property
+ def labels_used(self): pass
+ @funccontext_property
+ def continue_label(self): pass
+ @funccontext_property
+ def break_label(self): pass
+ @funccontext_property
+ def return_from_error_cleanup_label(self): pass
+ @funccontext_property
+ def yield_labels(self): pass
+
+ # Functions delegated to function scope
+ def new_label(self, name=None): return self.funcstate.new_label(name)
+ def new_error_label(self): return self.funcstate.new_error_label()
+ def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
+ def get_loop_labels(self): return self.funcstate.get_loop_labels()
+ def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
+ def new_loop_labels(self): return self.funcstate.new_loop_labels()
+ def get_all_labels(self): return self.funcstate.get_all_labels()
+ def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels)
+ def all_new_labels(self): return self.funcstate.all_new_labels()
+ def use_label(self, lbl): return self.funcstate.use_label(lbl)
+ def label_used(self, lbl): return self.funcstate.label_used(lbl)
+
+
+ def enter_cfunc_scope(self, scope=None):
+ self.funcstate = FunctionState(self, scope=scope)
+
+ def exit_cfunc_scope(self):
+ self.funcstate = None
+
+ # constant handling
+
+ def get_py_int(self, str_value, longness):
+ return self.globalstate.get_int_const(str_value, longness).cname
+
+ def get_py_float(self, str_value, value_code):
+ return self.globalstate.get_float_const(str_value, value_code).cname
+
+ def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
+ return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname
+
+ def get_string_const(self, text):
+ return self.globalstate.get_string_const(text).cname
+
+ def get_pyunicode_ptr_const(self, text):
+ return self.globalstate.get_pyunicode_ptr_const(text)
+
+ def get_py_string_const(self, text, identifier=None,
+ is_str=False, unicode_value=None):
+ return self.globalstate.get_py_string_const(
+ text, identifier, is_str, unicode_value).cname
+
+ def get_argument_default_const(self, type):
+ return self.globalstate.get_py_const(type).cname
+
+ def intern(self, text):
+ return self.get_py_string_const(text)
+
+ def intern_identifier(self, text):
+ return self.get_py_string_const(text, identifier=True)
+
+ def get_cached_constants_writer(self, target=None):
+ return self.globalstate.get_cached_constants_writer(target)
+
+ # code generation
+
+ def putln(self, code="", safe=False):
+ if self.last_pos and self.bol:
+ self.emit_marker()
+ if self.code_config.emit_linenums and self.last_marked_pos:
+ source_desc, line, _ = self.last_marked_pos
+ self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description()))
+ if code:
+ if safe:
+ self.put_safe(code)
+ else:
+ self.put(code)
+ self.write("\n")
+ self.bol = 1
+
+ def mark_pos(self, pos, trace=True):
+ if pos is None:
+ return
+ if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]:
+ return
+ self.last_pos = (pos, trace)
+
+ def emit_marker(self):
+ pos, trace = self.last_pos
+ self.last_marked_pos = pos
+ self.last_pos = None
+ self.write("\n")
+ if self.code_config.emit_code_comments:
+ self.indent()
+ self.write("/* %s */\n" % self._build_marker(pos))
+ if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']:
+ self.indent()
+ self.write('__Pyx_TraceLine(%d,%d,%s)\n' % (
+ pos[1], not self.funcstate.gil_owned, self.error_goto(pos)))
+
+ def _build_marker(self, pos):
+ source_desc, line, col = pos
+ assert isinstance(source_desc, SourceDescriptor)
+ contents = self.globalstate.commented_file_contents(source_desc)
+ lines = contents[max(0, line-3):line] # line numbers start at 1
+ lines[-1] += u' # <<<<<<<<<<<<<<'
+ lines += contents[line:line+2]
+ return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines))
+
+ def put_safe(self, code):
+ # put code, but ignore {}
+ self.write(code)
+ self.bol = 0
+
+ def put_or_include(self, code, name):
+ include_dir = self.globalstate.common_utility_include_dir
+ if include_dir and len(code) > 1024:
+ include_file = "%s_%s.h" % (
+ name, hashlib.md5(code.encode('utf8')).hexdigest())
+ path = os.path.join(include_dir, include_file)
+ if not os.path.exists(path):
+ tmp_path = '%s.tmp%s' % (path, os.getpid())
+ with closing(Utils.open_new_file(tmp_path)) as f:
+ f.write(code)
+ shutil.move(tmp_path, path)
+ code = '#include "%s"\n' % path
+ self.put(code)
+
+ def put(self, code):
+ fix_indent = False
+ if "{" in code:
+ dl = code.count("{")
+ else:
+ dl = 0
+ if "}" in code:
+ dl -= code.count("}")
+ if dl < 0:
+ self.level += dl
+ elif dl == 0 and code[0] == "}":
+ # special cases like "} else {" need a temporary dedent
+ fix_indent = True
+ self.level -= 1
+ if self.bol:
+ self.indent()
+ self.write(code)
+ self.bol = 0
+ if dl > 0:
+ self.level += dl
+ elif fix_indent:
+ self.level += 1
+
+ def putln_tempita(self, code, **context):
+ from ..Tempita import sub
+ self.putln(sub(code, **context))
+
+ def put_tempita(self, code, **context):
+ from ..Tempita import sub
+ self.put(sub(code, **context))
+
+ def increase_indent(self):
+ self.level += 1
+
+ def decrease_indent(self):
+ self.level -= 1
+
+ def begin_block(self):
+ self.putln("{")
+ self.increase_indent()
+
+ def end_block(self):
+ self.decrease_indent()
+ self.putln("}")
+
+ def indent(self):
+ self.write(" " * self.level)
+
+ def get_py_version_hex(self, pyversion):
+ return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4]
+
+ def put_label(self, lbl):
+ if lbl in self.funcstate.labels_used:
+ self.putln("%s:;" % lbl)
+
+ def put_goto(self, lbl):
+ self.funcstate.use_label(lbl)
+ self.putln("goto %s;" % lbl)
+
+ def put_var_declaration(self, entry, storage_class="",
+ dll_linkage=None, definition=True):
+ #print "Code.put_var_declaration:", entry.name, "definition =", definition ###
+ if entry.visibility == 'private' and not (definition or entry.defined_in_pxd):
+ #print "...private and not definition, skipping", entry.cname ###
+ return
+ if entry.visibility == "private" and not entry.used:
+ #print "...private and not used, skipping", entry.cname ###
+ return
+ if storage_class:
+ self.put("%s " % storage_class)
+ if not entry.cf_used:
+ self.put('CYTHON_UNUSED ')
+ self.put(entry.type.declaration_code(
+ entry.cname, dll_linkage=dll_linkage))
+ if entry.init is not None:
+ self.put_safe(" = %s" % entry.type.literal_code(entry.init))
+ elif entry.type.is_pyobject:
+ self.put(" = NULL")
+ self.putln(";")
+
+ def put_temp_declarations(self, func_context):
+ for name, type, manage_ref, static in func_context.temps_allocated:
+ decl = type.declaration_code(name)
+ if type.is_pyobject:
+ self.putln("%s = NULL;" % decl)
+ elif type.is_memoryviewslice:
+ from . import MemoryView
+ self.putln("%s = %s;" % (decl, MemoryView.memslice_entry_init))
+ else:
+ self.putln("%s%s;" % (static and "static " or "", decl))
+
+ if func_context.should_declare_error_indicator:
+ if self.funcstate.uses_error_indicator:
+ unused = ''
+ else:
+ unused = 'CYTHON_UNUSED '
+ # Initialize these variables to silence compiler warnings
+ self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname))
+ self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname))
+ self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname))
+
+ def put_generated_by(self):
+ self.putln("/* Generated by Cython %s */" % Version.watermark)
+ self.putln("")
+
+ def put_h_guard(self, guard):
+ self.putln("#ifndef %s" % guard)
+ self.putln("#define %s" % guard)
+
+ def unlikely(self, cond):
+ if Options.gcc_branch_hints:
+ return 'unlikely(%s)' % cond
+ else:
+ return cond
+
+ def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper):
+ if not modifiers:
+ return ''
+ return '%s ' % ' '.join([mapper(m,m) for m in modifiers])
+
+ # Python objects and reference counting
+
+ def entry_as_pyobject(self, entry):
+ type = entry.type
+ if (not entry.is_self_arg and not entry.type.is_complete()
+ or entry.type.is_extension_type):
+ return "(PyObject *)" + entry.cname
+ else:
+ return entry.cname
+
+ def as_pyobject(self, cname, type):
+ from .PyrexTypes import py_object_type, typecast
+ return typecast(py_object_type, type, cname)
+
+ def put_gotref(self, cname):
+ self.putln("__Pyx_GOTREF(%s);" % cname)
+
+ def put_giveref(self, cname):
+ self.putln("__Pyx_GIVEREF(%s);" % cname)
+
+ def put_xgiveref(self, cname):
+ self.putln("__Pyx_XGIVEREF(%s);" % cname)
+
+ def put_xgotref(self, cname):
+ self.putln("__Pyx_XGOTREF(%s);" % cname)
+
+ def put_incref(self, cname, type, nanny=True):
+ if nanny:
+ self.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname, type))
+ else:
+ self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type))
+
+ def put_decref(self, cname, type, nanny=True):
+ self._put_decref(cname, type, nanny, null_check=False, clear=False)
+
+ def put_var_gotref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_GOTREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_giveref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_GIVEREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_xgotref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_XGOTREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_xgiveref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_XGIVEREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_incref(self, entry, nanny=True):
+ if entry.type.is_pyobject:
+ if nanny:
+ self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
+ else:
+ self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_xincref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_XINCREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_decref_clear(self, cname, type, nanny=True, clear_before_decref=False):
+ self._put_decref(cname, type, nanny, null_check=False,
+ clear=True, clear_before_decref=clear_before_decref)
+
+ def put_xdecref(self, cname, type, nanny=True, have_gil=True):
+ self._put_decref(cname, type, nanny, null_check=True,
+ have_gil=have_gil, clear=False)
+
+ def put_xdecref_clear(self, cname, type, nanny=True, clear_before_decref=False):
+ self._put_decref(cname, type, nanny, null_check=True,
+ clear=True, clear_before_decref=clear_before_decref)
+
+ def _put_decref(self, cname, type, nanny=True, null_check=False,
+ have_gil=True, clear=False, clear_before_decref=False):
+ if type.is_memoryviewslice:
+ self.put_xdecref_memoryviewslice(cname, have_gil=have_gil)
+ return
+
+ prefix = '__Pyx' if nanny else 'Py'
+ X = 'X' if null_check else ''
+
+ if clear:
+ if clear_before_decref:
+ if not nanny:
+ X = '' # CPython doesn't have a Py_XCLEAR()
+ self.putln("%s_%sCLEAR(%s);" % (prefix, X, cname))
+ else:
+ self.putln("%s_%sDECREF(%s); %s = 0;" % (
+ prefix, X, self.as_pyobject(cname, type), cname))
+ else:
+ self.putln("%s_%sDECREF(%s);" % (
+ prefix, X, self.as_pyobject(cname, type)))
+
+ def put_decref_set(self, cname, rhs_cname):
+ self.putln("__Pyx_DECREF_SET(%s, %s);" % (cname, rhs_cname))
+
+ def put_xdecref_set(self, cname, rhs_cname):
+ self.putln("__Pyx_XDECREF_SET(%s, %s);" % (cname, rhs_cname))
+
+ def put_var_decref(self, entry):
+ if entry.type.is_pyobject:
+ self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_xdecref(self, entry, nanny=True):
+ if entry.type.is_pyobject:
+ if nanny:
+ self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
+ else:
+ self.putln("Py_XDECREF(%s);" % self.entry_as_pyobject(entry))
+
+ def put_var_decref_clear(self, entry):
+ self._put_var_decref_clear(entry, null_check=False)
+
+ def put_var_xdecref_clear(self, entry):
+ self._put_var_decref_clear(entry, null_check=True)
+
+ def _put_var_decref_clear(self, entry, null_check):
+ if entry.type.is_pyobject:
+ if entry.in_closure:
+ # reset before DECREF to make sure closure state is
+ # consistent during call to DECREF()
+ self.putln("__Pyx_%sCLEAR(%s);" % (
+ null_check and 'X' or '',
+ entry.cname))
+ else:
+ self.putln("__Pyx_%sDECREF(%s); %s = 0;" % (
+ null_check and 'X' or '',
+ self.entry_as_pyobject(entry),
+ entry.cname))
+
+ def put_var_decrefs(self, entries, used_only = 0):
+ for entry in entries:
+ if not used_only or entry.used:
+ if entry.xdecref_cleanup:
+ self.put_var_xdecref(entry)
+ else:
+ self.put_var_decref(entry)
+
+ def put_var_xdecrefs(self, entries):
+ for entry in entries:
+ self.put_var_xdecref(entry)
+
+ def put_var_xdecrefs_clear(self, entries):
+ for entry in entries:
+ self.put_var_xdecref_clear(entry)
+
+ def put_incref_memoryviewslice(self, slice_cname, have_gil=False):
+ from . import MemoryView
+ self.globalstate.use_utility_code(MemoryView.memviewslice_init_code)
+ self.putln("__PYX_INC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
+
+ def put_xdecref_memoryviewslice(self, slice_cname, have_gil=False):
+ from . import MemoryView
+ self.globalstate.use_utility_code(MemoryView.memviewslice_init_code)
+ self.putln("__PYX_XDEC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
+
+ def put_xgiveref_memoryviewslice(self, slice_cname):
+ self.put_xgiveref("%s.memview" % slice_cname)
+
+ def put_init_to_py_none(self, cname, type, nanny=True):
+ from .PyrexTypes import py_object_type, typecast
+ py_none = typecast(type, py_object_type, "Py_None")
+ if nanny:
+ self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
+ else:
+ self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
+
+ def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
+ code = template % entry.cname
+ #if entry.type.is_extension_type:
+ # code = "((PyObject*)%s)" % code
+ self.put_init_to_py_none(code, entry.type, nanny)
+ if entry.in_closure:
+ self.put_giveref('Py_None')
+
+ def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None):
+ if entry.is_special or entry.name == '__getattribute__':
+ if entry.name not in special_py_methods:
+ if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']:
+ pass
+ # Python's typeobject.c will automatically fill in our slot
+ # in add_operators() (called by PyType_Ready) with a value
+ # that's better than ours.
+ elif allow_skip:
+ return
+
+ method_flags = entry.signature.method_flags()
+ if not method_flags:
+ return
+ from . import TypeSlots
+ if entry.is_special or TypeSlots.is_reverse_number_slot(entry.name):
+ method_flags += [TypeSlots.method_coexist]
+ func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname
+ # Add required casts, but try not to shadow real warnings.
+ cast = '__Pyx_PyCFunctionFast' if 'METH_FASTCALL' in method_flags else 'PyCFunction'
+ if 'METH_KEYWORDS' in method_flags:
+ cast += 'WithKeywords'
+ if cast != 'PyCFunction':
+ func_ptr = '(void*)(%s)%s' % (cast, func_ptr)
+ self.putln(
+ '{"%s", (PyCFunction)%s, %s, %s}%s' % (
+ entry.name,
+ func_ptr,
+ "|".join(method_flags),
+ entry.doc_cname if entry.doc else '0',
+ term))
+
+ def put_pymethoddef_wrapper(self, entry):
+ func_cname = entry.func_cname
+ if entry.is_special:
+ method_flags = entry.signature.method_flags()
+ if method_flags and 'METH_NOARGS' in method_flags:
+ # Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one.
+ func_cname = Naming.method_wrapper_prefix + func_cname
+ self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % (
+ func_cname, entry.func_cname))
+ return func_cname
+
+ # GIL methods
+
+ def put_ensure_gil(self, declare_gilstate=True, variable=None):
+ """
+ Acquire the GIL. The generated code is safe even when no PyThreadState
+ has been allocated for this thread (for threads not initialized by
+ using the Python API). Additionally, the code generated by this method
+ may be called recursively.
+ """
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ self.putln("#ifdef WITH_THREAD")
+ if not variable:
+ variable = '__pyx_gilstate_save'
+ if declare_gilstate:
+ self.put("PyGILState_STATE ")
+ self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
+ self.putln("#endif")
+
+ def put_release_ensured_gil(self, variable=None):
+ """
+ Releases the GIL, corresponds to `put_ensure_gil`.
+ """
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ if not variable:
+ variable = '__pyx_gilstate_save'
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("__Pyx_PyGILState_Release(%s);" % variable)
+ self.putln("#endif")
+
+ def put_acquire_gil(self, variable=None):
+ """
+ Acquire the GIL. The thread's thread state must have been initialized
+ by a previous `put_release_gil`
+ """
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("__Pyx_FastGIL_Forget();")
+ if variable:
+ self.putln('_save = %s;' % variable)
+ self.putln("Py_BLOCK_THREADS")
+ self.putln("#endif")
+
+ def put_release_gil(self, variable=None):
+ "Release the GIL, corresponds to `put_acquire_gil`."
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("PyThreadState *_save;")
+ self.putln("Py_UNBLOCK_THREADS")
+ if variable:
+ self.putln('%s = _save;' % variable)
+ self.putln("__Pyx_FastGIL_Remember();")
+ self.putln("#endif")
+
+ def declare_gilstate(self):
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("PyGILState_STATE __pyx_gilstate_save;")
+ self.putln("#endif")
+
+ # error handling
+
+ def put_error_if_neg(self, pos, value):
+ # TODO this path is almost _never_ taken, yet this macro makes is slower!
+ # return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos)))
+ return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos)))
+
+ def put_error_if_unbound(self, pos, entry, in_nogil_context=False):
+ from . import ExprNodes
+ if entry.from_closure:
+ func = '__Pyx_RaiseClosureNameError'
+ self.globalstate.use_utility_code(
+ ExprNodes.raise_closure_name_error_utility_code)
+ elif entry.type.is_memoryviewslice and in_nogil_context:
+ func = '__Pyx_RaiseUnboundMemoryviewSliceNogil'
+ self.globalstate.use_utility_code(
+ ExprNodes.raise_unbound_memoryview_utility_code_nogil)
+ else:
+ func = '__Pyx_RaiseUnboundLocalError'
+ self.globalstate.use_utility_code(
+ ExprNodes.raise_unbound_local_error_utility_code)
+
+ self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % (
+ entry.type.check_for_null_code(entry.cname),
+ func,
+ entry.name,
+ self.error_goto(pos)))
+
+ def set_error_info(self, pos, used=False):
+ self.funcstate.should_declare_error_indicator = True
+ if used:
+ self.funcstate.uses_error_indicator = True
+ return "__PYX_MARK_ERR_POS(%s, %s)" % (
+ self.lookup_filename(pos[0]),
+ pos[1])
+
+ def error_goto(self, pos, used=True):
+ lbl = self.funcstate.error_label
+ self.funcstate.use_label(lbl)
+ if pos is None:
+ return 'goto %s;' % lbl
+ self.funcstate.should_declare_error_indicator = True
+ if used:
+ self.funcstate.uses_error_indicator = True
+ return "__PYX_ERR(%s, %s, %s)" % (
+ self.lookup_filename(pos[0]),
+ pos[1],
+ lbl)
+
+ def error_goto_if(self, cond, pos):
+ return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
+
+ def error_goto_if_null(self, cname, pos):
+ return self.error_goto_if("!%s" % cname, pos)
+
+ def error_goto_if_neg(self, cname, pos):
+ return self.error_goto_if("%s < 0" % cname, pos)
+
+ def error_goto_if_PyErr(self, pos):
+ return self.error_goto_if("PyErr_Occurred()", pos)
+
+ def lookup_filename(self, filename):
+ return self.globalstate.lookup_filename(filename)
+
+ def put_declare_refcount_context(self):
+ self.putln('__Pyx_RefNannyDeclarations')
+
+ def put_setup_refcount_context(self, name, acquire_gil=False):
+ if acquire_gil:
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+ self.putln('__Pyx_RefNannySetupContext("%s", %d);' % (name, acquire_gil and 1 or 0))
+
+ def put_finish_refcount_context(self):
+ self.putln("__Pyx_RefNannyFinishContext();")
+
+ def put_add_traceback(self, qualified_name, include_cline=True):
+ """
+ Build a Python traceback for propagating exceptions.
+
+ qualified_name should be the qualified name of the function.
+ """
+ format_tuple = (
+ qualified_name,
+ Naming.clineno_cname if include_cline else 0,
+ Naming.lineno_cname,
+ Naming.filename_cname,
+ )
+ self.funcstate.uses_error_indicator = True
+ self.putln('__Pyx_AddTraceback("%s", %s, %s, %s);' % format_tuple)
+
+ def put_unraisable(self, qualified_name, nogil=False):
+ """
+ Generate code to print a Python warning for an unraisable exception.
+
+ qualified_name should be the qualified name of the function.
+ """
+ format_tuple = (
+ qualified_name,
+ Naming.clineno_cname,
+ Naming.lineno_cname,
+ Naming.filename_cname,
+ self.globalstate.directives['unraisable_tracebacks'],
+ nogil,
+ )
+ self.funcstate.uses_error_indicator = True
+ self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple)
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c"))
+
+ def put_trace_declarations(self):
+ self.putln('__Pyx_TraceDeclarations')
+
+ def put_trace_frame_init(self, codeobj=None):
+ if codeobj:
+ self.putln('__Pyx_TraceFrameInit(%s)' % codeobj)
+
+ def put_trace_call(self, name, pos, nogil=False):
+ self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % (
+ name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos)))
+
+ def put_trace_exception(self):
+ self.putln("__Pyx_TraceException();")
+
+ def put_trace_return(self, retvalue_cname, nogil=False):
+ self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil))
+
+ def putln_openmp(self, string):
+ self.putln("#ifdef _OPENMP")
+ self.putln(string)
+ self.putln("#endif /* _OPENMP */")
+
+ def undef_builtin_expect(self, cond):
+ """
+ Redefine the macros likely() and unlikely to no-ops, depending on
+ condition 'cond'
+ """
+ self.putln("#if %s" % cond)
+ self.putln(" #undef likely")
+ self.putln(" #undef unlikely")
+ self.putln(" #define likely(x) (x)")
+ self.putln(" #define unlikely(x) (x)")
+ self.putln("#endif")
+
+ def redef_builtin_expect(self, cond):
+ self.putln("#if %s" % cond)
+ self.putln(" #undef likely")
+ self.putln(" #undef unlikely")
+ self.putln(" #define likely(x) __builtin_expect(!!(x), 1)")
+ self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
+ self.putln("#endif")
+
+
+class PyrexCodeWriter(object):
+ # f file output file
+ # level int indentation level
+
+ def __init__(self, outfile_name):
+ self.f = Utils.open_new_file(outfile_name)
+ self.level = 0
+
+ def putln(self, code):
+ self.f.write("%s%s\n" % (" " * self.level, code))
+
+ def indent(self):
+ self.level += 1
+
+ def dedent(self):
+ self.level -= 1
+
+class PyxCodeWriter(object):
+ """
+ Can be used for writing out some Cython code. To use the indenter
+ functionality, the Cython.Compiler.Importer module will have to be used
+ to load the code to support python 2.4
+ """
+
+ def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
+ self.buffer = buffer or StringIOTree()
+ self.level = indent_level
+ self.context = context
+ self.encoding = encoding
+
+ def indent(self, levels=1):
+ self.level += levels
+ return True
+
+ def dedent(self, levels=1):
+ self.level -= levels
+
+ def indenter(self, line):
+ """
+ Instead of
+
+ with pyx_code.indenter("for i in range(10):"):
+ pyx_code.putln("print i")
+
+ write
+
+ if pyx_code.indenter("for i in range(10);"):
+ pyx_code.putln("print i")
+ pyx_code.dedent()
+ """
+ self.putln(line)
+ self.indent()
+ return True
+
+ def getvalue(self):
+ result = self.buffer.getvalue()
+ if isinstance(result, bytes):
+ result = result.decode(self.encoding)
+ return result
+
+ def putln(self, line, context=None):
+ context = context or self.context
+ if context:
+ line = sub_tempita(line, context)
+ self._putln(line)
+
+ def _putln(self, line):
+ self.buffer.write("%s%s\n" % (self.level * " ", line))
+
+ def put_chunk(self, chunk, context=None):
+ context = context or self.context
+ if context:
+ chunk = sub_tempita(chunk, context)
+
+ chunk = textwrap.dedent(chunk)
+ for line in chunk.splitlines():
+ self._putln(line)
+
+ def insertion_point(self):
+ return PyxCodeWriter(self.buffer.insertion_point(), self.level,
+ self.context)
+
+ def named_insertion_point(self, name):
+ setattr(self, name, self.insertion_point())
+
+
+class ClosureTempAllocator(object):
+ def __init__(self, klass):
+ self.klass = klass
+ self.temps_allocated = {}
+ self.temps_free = {}
+ self.temps_count = 0
+
+ def reset(self):
+ for type, cnames in self.temps_allocated.items():
+ self.temps_free[type] = list(cnames)
+
+ def allocate_temp(self, type):
+ if type not in self.temps_allocated:
+ self.temps_allocated[type] = []
+ self.temps_free[type] = []
+ elif self.temps_free[type]:
+ return self.temps_free[type].pop(0)
+ cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
+ self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
+ self.temps_allocated[type].append(cname)
+ self.temps_count += 1
+ return cname
diff --git a/contrib/tools/cython/Cython/Compiler/CodeGeneration.py b/contrib/tools/cython/Cython/Compiler/CodeGeneration.py
new file mode 100644
index 0000000000..e64049c7f5
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/CodeGeneration.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+from .Visitor import VisitorTransform
+from .Nodes import StatListNode
+
+
+class ExtractPxdCode(VisitorTransform):
+ """
+ Finds nodes in a pxd file that should generate code, and
+ returns them in a StatListNode.
+
+ The result is a tuple (StatListNode, ModuleScope), i.e.
+ everything that is needed from the pxd after it is processed.
+
+ A purer approach would be to separately compile the pxd code,
+ but the result would have to be slightly more sophisticated
+ than pure strings (functions + wanted interned strings +
+ wanted utility code + wanted cached objects) so for now this
+ approach is taken.
+ """
+
+ def __call__(self, root):
+ self.funcs = []
+ self.visitchildren(root)
+ return (StatListNode(root.pos, stats=self.funcs), root.scope)
+
+ def visit_FuncDefNode(self, node):
+ self.funcs.append(node)
+ # Do not visit children, nested funcdefnodes will
+ # also be moved by this action...
+ return node
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/CythonScope.py b/contrib/tools/cython/Cython/Compiler/CythonScope.py
new file mode 100644
index 0000000000..1c25d1a6b4
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/CythonScope.py
@@ -0,0 +1,164 @@
+from __future__ import absolute_import
+
+from .Symtab import ModuleScope
+from .PyrexTypes import *
+from .UtilityCode import CythonUtilityCode
+from .Errors import error
+from .Scanning import StringSourceDescriptor
+from . import MemoryView
+
+
+class CythonScope(ModuleScope):
+ is_cython_builtin = 1
+ _cythonscope_initialized = False
+
+ def __init__(self, context):
+ ModuleScope.__init__(self, u'cython', None, None)
+ self.pxd_file_loaded = True
+ self.populate_cython_scope()
+ # The Main.Context object
+ self.context = context
+
+ for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
+ entry = self.declare_typedef(fused_type.name,
+ fused_type,
+ None,
+ cname='<error>')
+ entry.in_cinclude = True
+
+ def is_cpp(self):
+ # Allow C++ utility code in C++ contexts.
+ return self.context.cpp
+
+ def lookup_type(self, name):
+ # This function should go away when types are all first-level objects.
+ type = parse_basic_type(name)
+ if type:
+ return type
+
+ return super(CythonScope, self).lookup_type(name)
+
+ def lookup(self, name):
+ entry = super(CythonScope, self).lookup(name)
+
+ if entry is None and not self._cythonscope_initialized:
+ self.load_cythonscope()
+ entry = super(CythonScope, self).lookup(name)
+
+ return entry
+
+ def find_module(self, module_name, pos):
+ error("cython.%s is not available" % module_name, pos)
+
+ def find_submodule(self, module_name):
+ entry = self.entries.get(module_name, None)
+ if not entry:
+ self.load_cythonscope()
+ entry = self.entries.get(module_name, None)
+
+ if entry and entry.as_module:
+ return entry.as_module
+ else:
+ # TODO: fix find_submodule control flow so that we're not
+ # expected to create a submodule here (to protect CythonScope's
+ # possible immutability). Hack ourselves out of the situation
+ # for now.
+ raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
+ "cython.%s is not available" % module_name)
+
+ def lookup_qualified_name(self, qname):
+ # ExprNode.as_cython_attribute generates qnames and we untangle it here...
+ name_path = qname.split(u'.')
+ scope = self
+ while len(name_path) > 1:
+ scope = scope.lookup_here(name_path[0])
+ if scope:
+ scope = scope.as_module
+ del name_path[0]
+ if scope is None:
+ return None
+ else:
+ return scope.lookup_here(name_path[0])
+
+ def populate_cython_scope(self):
+ # These are used to optimize isinstance in FinalOptimizePhase
+ type_object = self.declare_typedef(
+ 'PyTypeObject',
+ base_type = c_void_type,
+ pos = None,
+ cname = 'PyTypeObject')
+ type_object.is_void = True
+ type_object_type = type_object.type
+
+ self.declare_cfunction(
+ 'PyObject_TypeCheck',
+ CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
+ CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
+ pos = None,
+ defining = 1,
+ cname = 'PyObject_TypeCheck')
+
+ def load_cythonscope(self):
+ """
+ Creates some entries for testing purposes and entries for
+ cython.array() and for cython.view.*.
+ """
+ if self._cythonscope_initialized:
+ return
+
+ self._cythonscope_initialized = True
+ cython_testscope_utility_code.declare_in_scope(
+ self, cython_scope=self)
+ cython_test_extclass_utility_code.declare_in_scope(
+ self, cython_scope=self)
+
+ #
+ # The view sub-scope
+ #
+ self.viewscope = viewscope = ModuleScope(u'view', self, None)
+ self.declare_module('view', viewscope, None).as_module = viewscope
+ viewscope.is_cython_builtin = True
+ viewscope.pxd_file_loaded = True
+
+ cythonview_testscope_utility_code.declare_in_scope(
+ viewscope, cython_scope=self)
+
+ view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
+ self.viewscope, cython_scope=self,
+ whitelist=MemoryView.view_utility_whitelist)
+
+ # self.entries["array"] = view_utility_scope.entries.pop("array")
+
+
+def create_cython_scope(context):
+ # One could in fact probably make it a singleton,
+ # but not sure yet whether any code mutates it (which would kill reusing
+ # it across different contexts)
+ return CythonScope(context)
+
+# Load test utilities for the cython scope
+
+def load_testscope_utility(cy_util_name, **kwargs):
+ return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
+
+
+undecorated_methods_protos = UtilityCode(proto=u"""
+ /* These methods are undecorated and have therefore no prototype */
+ static PyObject *__pyx_TestClass_cdef_method(
+ struct __pyx_TestClass_obj *self, int value);
+ static PyObject *__pyx_TestClass_cpdef_method(
+ struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
+ static PyObject *__pyx_TestClass_def_method(
+ PyObject *self, PyObject *value);
+""")
+
+cython_testscope_utility_code = load_testscope_utility("TestScope")
+
+test_cython_utility_dep = load_testscope_utility("TestDep")
+
+cython_test_extclass_utility_code = \
+ load_testscope_utility("TestClass", name="TestClass",
+ requires=[undecorated_methods_protos,
+ test_cython_utility_dep])
+
+cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
diff --git a/contrib/tools/cython/Cython/Compiler/DebugFlags.py b/contrib/tools/cython/Cython/Compiler/DebugFlags.py
new file mode 100644
index 0000000000..e830ab1849
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/DebugFlags.py
@@ -0,0 +1,21 @@
+# Can be enabled at the command line with --debug-xxx.
+
+debug_disposal_code = 0
+debug_temp_alloc = 0
+debug_coercion = 0
+
+# Write comments into the C code that show where temporary variables
+# are allocated and released.
+debug_temp_code_comments = 0
+
+# Write a call trace of the code generation phase into the C code.
+debug_trace_code_generation = 0
+
+# Do not replace exceptions with user-friendly error messages.
+debug_no_exception_intercept = 0
+
+# Print a message each time a new stage in the pipeline is entered.
+debug_verbose_pipeline = 0
+
+# Raise an exception when an error is encountered.
+debug_exception_on_error = 0
diff --git a/contrib/tools/cython/Cython/Compiler/Errors.py b/contrib/tools/cython/Cython/Compiler/Errors.py
new file mode 100644
index 0000000000..9761b52c32
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Errors.py
@@ -0,0 +1,265 @@
+#
+# Errors
+#
+
+from __future__ import absolute_import
+
+try:
+ from __builtin__ import basestring as any_string_type
+except ImportError:
+ any_string_type = (bytes, str)
+
+import sys
+from contextlib import contextmanager
+
+from ..Utils import open_new_file
+from . import DebugFlags
+from . import Options
+
+
+class PyrexError(Exception):
+ pass
+
+
+class PyrexWarning(Exception):
+ pass
+
+
+def context(position):
+ source = position[0]
+ assert not (isinstance(source, any_string_type)), (
+ "Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source)
+ try:
+ F = source.get_lines()
+ except UnicodeDecodeError:
+ # file has an encoding problem
+ s = u"[unprintable code]\n"
+ else:
+ s = u''.join(F[max(0, position[1]-6):position[1]])
+ s = u'...\n%s%s^\n' % (s, u' '*(position[2]-1))
+ s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60)
+ return s
+
+def format_position(position):
+ if position:
+ return u"%s:%d:%d: " % (position[0].get_error_description(),
+ position[1], position[2])
+ return u''
+
+def format_error(message, position):
+ if position:
+ pos_str = format_position(position)
+ cont = context(position)
+ message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'')
+ return message
+
+class CompileError(PyrexError):
+
+ def __init__(self, position = None, message = u""):
+ self.position = position
+ self.message_only = message
+ self.formatted_message = format_error(message, position)
+ self.reported = False
+ # Deprecated and withdrawn in 2.6:
+ # self.message = message
+ Exception.__init__(self, self.formatted_message)
+ # Python Exception subclass pickling is broken,
+ # see http://bugs.python.org/issue1692335
+ self.args = (position, message)
+
+ def __str__(self):
+ return self.formatted_message
+
+class CompileWarning(PyrexWarning):
+
+ def __init__(self, position = None, message = ""):
+ self.position = position
+ # Deprecated and withdrawn in 2.6:
+ # self.message = message
+ Exception.__init__(self, format_position(position) + message)
+
+class InternalError(Exception):
+ # If this is ever raised, there is a bug in the compiler.
+
+ def __init__(self, message):
+ self.message_only = message
+ Exception.__init__(self, u"Internal compiler error: %s"
+ % message)
+
+class AbortError(Exception):
+ # Throw this to stop the compilation immediately.
+
+ def __init__(self, message):
+ self.message_only = message
+ Exception.__init__(self, u"Abort error: %s" % message)
+
+class CompilerCrash(CompileError):
+ # raised when an unexpected exception occurs in a transform
+ def __init__(self, pos, context, message, cause, stacktrace=None):
+ if message:
+ message = u'\n' + message
+ else:
+ message = u'\n'
+ self.message_only = message
+ if context:
+ message = u"Compiler crash in %s%s" % (context, message)
+ if stacktrace:
+ import traceback
+ message += (
+ u'\n\nCompiler crash traceback from this point on:\n' +
+ u''.join(traceback.format_tb(stacktrace)))
+ if cause:
+ if not stacktrace:
+ message += u'\n'
+ message += u'%s: %s' % (cause.__class__.__name__, cause)
+ CompileError.__init__(self, pos, message)
+ # Python Exception subclass pickling is broken,
+ # see http://bugs.python.org/issue1692335
+ self.args = (pos, context, message, cause, stacktrace)
+
+class NoElementTreeInstalledException(PyrexError):
+ """raised when the user enabled options.gdb_debug but no ElementTree
+ implementation was found
+ """
+
+listing_file = None
+num_errors = 0
+echo_file = None
+
+def open_listing_file(path, echo_to_stderr = 1):
+ # Begin a new error listing. If path is None, no file
+ # is opened, the error counter is just reset.
+ global listing_file, num_errors, echo_file
+ if path is not None:
+ listing_file = open_new_file(path)
+ else:
+ listing_file = None
+ if echo_to_stderr:
+ echo_file = sys.stderr
+ else:
+ echo_file = None
+ num_errors = 0
+
+def close_listing_file():
+ global listing_file
+ if listing_file:
+ listing_file.close()
+ listing_file = None
+
+def report_error(err, use_stack=True):
+ if error_stack and use_stack:
+ error_stack[-1].append(err)
+ else:
+ global num_errors
+ # See Main.py for why dual reporting occurs. Quick fix for now.
+ if err.reported: return
+ err.reported = True
+ try: line = u"%s\n" % err
+ except UnicodeEncodeError:
+ # Python <= 2.5 does this for non-ASCII Unicode exceptions
+ line = format_error(getattr(err, 'message_only', "[unprintable exception message]"),
+ getattr(err, 'position', None)) + u'\n'
+ if listing_file:
+ try: listing_file.write(line)
+ except UnicodeEncodeError:
+ listing_file.write(line.encode('ASCII', 'replace'))
+ if echo_file:
+ try: echo_file.write(line)
+ except UnicodeEncodeError:
+ echo_file.write(line.encode('ASCII', 'replace'))
+ num_errors += 1
+ if Options.fast_fail:
+ raise AbortError("fatal errors")
+
+
+def error(position, message):
+ #print("Errors.error:", repr(position), repr(message)) ###
+ if position is None:
+ raise InternalError(message)
+ err = CompileError(position, message)
+ if DebugFlags.debug_exception_on_error: raise Exception(err) # debug
+ report_error(err)
+ return err
+
+
+LEVEL = 1 # warn about all errors level 1 or higher
+
+
+def message(position, message, level=1):
+ if level < LEVEL:
+ return
+ warn = CompileWarning(position, message)
+ line = "note: %s\n" % warn
+ if listing_file:
+ listing_file.write(line)
+ if echo_file:
+ echo_file.write(line)
+ return warn
+
+
+def warning(position, message, level=0):
+ if level < LEVEL:
+ return
+ if Options.warning_errors and position:
+ return error(position, message)
+ warn = CompileWarning(position, message)
+ line = "warning: %s\n" % warn
+ if listing_file:
+ listing_file.write(line)
+ if echo_file:
+ echo_file.write(line)
+ return warn
+
+
+_warn_once_seen = {}
+def warn_once(position, message, level=0):
+ if level < LEVEL or message in _warn_once_seen:
+ return
+ warn = CompileWarning(position, message)
+ line = "warning: %s\n" % warn
+ if listing_file:
+ listing_file.write(line)
+ if echo_file:
+ echo_file.write(line)
+ _warn_once_seen[message] = True
+ return warn
+
+
+# These functions can be used to momentarily suppress errors.
+
+error_stack = []
+
+
+def hold_errors():
+ error_stack.append([])
+
+
+def release_errors(ignore=False):
+ held_errors = error_stack.pop()
+ if not ignore:
+ for err in held_errors:
+ report_error(err)
+
+
+def held_errors():
+ return error_stack[-1]
+
+
+# same as context manager:
+
+@contextmanager
+def local_errors(ignore=False):
+ errors = []
+ error_stack.append(errors)
+ try:
+ yield errors
+ finally:
+ release_errors(ignore=ignore)
+
+
+# this module needs a redesign to support parallel cythonisation, but
+# for now, the following works at least in sequential compiler runs
+
+def reset():
+ _warn_once_seen.clear()
+ del error_stack[:]
diff --git a/contrib/tools/cython/Cython/Compiler/ExprNodes.py b/contrib/tools/cython/Cython/Compiler/ExprNodes.py
new file mode 100644
index 0000000000..7b065fcaba
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/ExprNodes.py
@@ -0,0 +1,13719 @@
+#
+# Parse tree nodes for expressions
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
+ CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
+ StringEncoding=object, operator=object, local_errors=object, report_error=object,
+ Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
+ list_type=object, tuple_type=object, set_type=object, dict_type=object,
+ unicode_type=object, str_type=object, bytes_type=object, type_type=object,
+ Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
+ debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
+ bytearray_type=object, slice_type=object, _py_int_types=object,
+ IS_PYTHON3=cython.bint)
+
+import re
+import sys
+import copy
+import os.path
+import operator
+
+from .Errors import (
+ error, warning, InternalError, CompileError, report_error, local_errors)
+from .Code import UtilityCode, TempitaUtilityCode
+from . import StringEncoding
+from . import Naming
+from . import Nodes
+from .Nodes import Node, utility_code_for_imports, analyse_type_annotation
+from . import PyrexTypes
+from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
+ unspecified_type
+from . import TypeSlots
+from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
+ unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
+from . import Builtin
+from . import Symtab
+from .. import Utils
+from .Annotate import AnnotationItem
+from . import Future
+from ..Debugging import print_call_chain
+from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
+ debug_coercion
+from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type,
+ is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran,
+ pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type,
+ pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor)
+from .PyrexTypes import PythranExpr
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ # Python 3
+ basestring = str
+ any_string_type = (bytes, str)
+else:
+ # Python 2
+ any_string_type = (bytes, unicode)
+
+
+if sys.version_info[0] >= 3:
+ IS_PYTHON3 = True
+ _py_int_types = int
+else:
+ IS_PYTHON3 = False
+ _py_int_types = (int, long)
+
+
+class NotConstant(object):
+ _obj = None
+
+ def __new__(cls):
+ if NotConstant._obj is None:
+ NotConstant._obj = super(NotConstant, cls).__new__(cls)
+
+ return NotConstant._obj
+
+ def __repr__(self):
+ return "<NOT CONSTANT>"
+
+not_a_constant = NotConstant()
+constant_value_not_set = object()
+
+# error messages when coercing from key[0] to key[1]
+coercion_error_dict = {
+ # string related errors
+ (unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
+ " This is not portable and requires explicit encoding."),
+ (unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
+ (unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
+ (bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
+ (bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
+ " This is not portable to Py3."),
+ (bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
+ (bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
+ "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
+ (basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
+ (str_type, unicode_type): ("str objects do not support coercion to unicode,"
+ " use a unicode string literal instead (u'')"),
+ (str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
+ (str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
+ (str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
+ "'str' objects do not support coercion to C types (use 'unicode'?)."),
+ (PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
+ (PyrexTypes.c_const_char_ptr_type, unicode_type): (
+ "Cannot convert 'char*' to unicode implicitly, decoding required"),
+ (PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
+ (PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
+ "Cannot convert 'char*' to unicode implicitly, decoding required"),
+}
+
+def find_coercion_error(type_tuple, default, env):
+ err = coercion_error_dict.get(type_tuple)
+ if err is None:
+ return default
+ elif (env.directives['c_string_encoding'] and
+ any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
+ PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
+ if type_tuple[1].is_pyobject:
+ return default
+ elif env.directives['c_string_encoding'] in ('ascii', 'default'):
+ return default
+ else:
+ return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
+ else:
+ return err
+
+
+def default_str_type(env):
+ return {
+ 'bytes': bytes_type,
+ 'bytearray': bytearray_type,
+ 'str': str_type,
+ 'unicode': unicode_type
+ }.get(env.directives['c_string_type'])
+
+
+def check_negative_indices(*nodes):
+ """
+ Raise a warning on nodes that are known to have negative numeric values.
+ Used to find (potential) bugs inside of "wraparound=False" sections.
+ """
+ for node in nodes:
+ if node is None or (
+ not isinstance(node.constant_result, _py_int_types) and
+ not isinstance(node.constant_result, float)):
+ continue
+ if node.constant_result < 0:
+ warning(node.pos,
+ "the result of using negative indices inside of "
+ "code sections marked as 'wraparound=False' is "
+ "undefined", level=1)
+
+
+def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
+ if not seq_node.is_sequence_constructor:
+ if seq_type is None:
+ seq_type = seq_node.infer_type(env)
+ if seq_type is tuple_type:
+ # tuples are immutable => we can safely follow assignments
+ if seq_node.cf_state and len(seq_node.cf_state) == 1:
+ try:
+ seq_node = seq_node.cf_state[0].rhs
+ except AttributeError:
+ pass
+ if seq_node is not None and seq_node.is_sequence_constructor:
+ if index_node is not None and index_node.has_constant_result():
+ try:
+ item = seq_node.args[index_node.constant_result]
+ except (ValueError, TypeError, IndexError):
+ pass
+ else:
+ return item.infer_type(env)
+ # if we're lucky, all items have the same type
+ item_types = set([item.infer_type(env) for item in seq_node.args])
+ if len(item_types) == 1:
+ return item_types.pop()
+ return None
+
+
+def make_dedup_key(outer_type, item_nodes):
+ """
+ Recursively generate a deduplication key from a sequence of values.
+ Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example.
+
+ @param outer_type: The type of the outer container.
+ @param item_nodes: A sequence of constant nodes that will be traversed recursively.
+ @return: A tuple that can be used as a dict key for deduplication.
+ """
+ item_keys = [
+ (py_object_type, None, type(None)) if node is None
+ # For sequences and their "mult_factor", see TupleNode.
+ else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor
+ else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice
+ # For constants, look at the Python value type if we don't know the concrete Cython type.
+ else (node.type, node.constant_result,
+ type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result()
+ else None # something we cannot handle => short-circuit below
+ for node in item_nodes
+ ]
+ if None in item_keys:
+ return None
+ return outer_type, tuple(item_keys)
+
+
+# Returns a block of code to translate the exception,
+# plus a boolean indicating whether to check for Python exceptions.
+def get_exception_handler(exception_value):
+ if exception_value is None:
+ return "__Pyx_CppExn2PyErr();", False
+ elif (exception_value.type == PyrexTypes.c_char_type
+ and exception_value.value == '*'):
+ return "__Pyx_CppExn2PyErr();", True
+ elif exception_value.type.is_pyobject:
+ return (
+ 'try { throw; } catch(const std::exception& exn) {'
+ 'PyErr_SetString(%s, exn.what());'
+ '} catch(...) { PyErr_SetNone(%s); }' % (
+ exception_value.entry.cname,
+ exception_value.entry.cname),
+ False)
+ else:
+ return (
+ '%s(); if (!PyErr_Occurred())'
+ 'PyErr_SetString(PyExc_RuntimeError, '
+ '"Error converting c++ exception.");' % (
+ exception_value.entry.cname),
+ False)
+
+def maybe_check_py_error(code, check_py_exception, pos, nogil):
+ if check_py_exception:
+ if nogil:
+ code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos))
+ else:
+ code.putln(code.error_goto_if("PyErr_Occurred()", pos))
+
+def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil):
+ raise_py_exception, check_py_exception = get_exception_handler(exception_value)
+ code.putln("try {")
+ code.putln("%s" % inside)
+ if py_result:
+ code.putln(code.error_goto_if_null(py_result, pos))
+ maybe_check_py_error(code, check_py_exception, pos, nogil)
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(raise_py_exception)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln("}")
+
+# Used to handle the case where an lvalue expression and an overloaded assignment
+# both have an exception declaration.
+def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code,
+ lhs_exc_val, assign_exc_val, nogil):
+ handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val)
+ handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val)
+ code.putln("try {")
+ code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
+ maybe_check_py_error(code, lhc_check_py_exc, pos, nogil)
+ code.putln("try {")
+ code.putln("__pyx_local_lvalue = %s;" % rhs_code)
+ maybe_check_py_error(code, assignment_check_py_exc, pos, nogil)
+ # Catch any exception from the overloaded assignment.
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(handle_assignment_exc)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln("}")
+ # Catch any exception from evaluating lhs.
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(handle_lhs_exc)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln('}')
+
+
+class ExprNode(Node):
+ # subexprs [string] Class var holding names of subexpr node attrs
+ # type PyrexType Type of the result
+ # result_code string Code fragment
+ # result_ctype string C type of result_code if different from type
+ # is_temp boolean Result is in a temporary variable
+ # is_sequence_constructor
+ # boolean Is a list or tuple constructor expression
+ # is_starred boolean Is a starred expression (e.g. '*a')
+ # saved_subexpr_nodes
+ # [ExprNode or [ExprNode or None] or None]
+ # Cached result of subexpr_nodes()
+ # use_managed_ref boolean use ref-counted temps/assignments/etc.
+ # result_is_used boolean indicates that the result will be dropped and the
+ # result_code/temp_result can safely be set to None
+ # is_numpy_attribute boolean Is a Numpy module attribute
+ # annotation ExprNode or None PEP526 annotation for names or expressions
+
+ result_ctype = None
+ type = None
+ annotation = None
+ temp_code = None
+ old_temp = None # error checker for multiple frees etc.
+ use_managed_ref = True # can be set by optimisation transforms
+ result_is_used = True
+ is_numpy_attribute = False
+
+ # The Analyse Expressions phase for expressions is split
+ # into two sub-phases:
+ #
+ # Analyse Types
+ # Determines the result type of the expression based
+ # on the types of its sub-expressions, and inserts
+ # coercion nodes into the expression tree where needed.
+ # Marks nodes which will need to have temporary variables
+ # allocated.
+ #
+ # Allocate Temps
+ # Allocates temporary variables where needed, and fills
+ # in the result_code field of each node.
+ #
+ # ExprNode provides some convenience routines which
+ # perform both of the above phases. These should only
+ # be called from statement nodes, and only when no
+ # coercion nodes need to be added around the expression
+ # being analysed. In that case, the above two phases
+ # should be invoked separately.
+ #
+ # Framework code in ExprNode provides much of the common
+ # processing for the various phases. It makes use of the
+ # 'subexprs' class attribute of ExprNodes, which should
+ # contain a list of the names of attributes which can
+ # hold sub-nodes or sequences of sub-nodes.
+ #
+ # The framework makes use of a number of abstract methods.
+ # Their responsibilities are as follows.
+ #
+ # Declaration Analysis phase
+ #
+ # analyse_target_declaration
+ # Called during the Analyse Declarations phase to analyse
+ # the LHS of an assignment or argument of a del statement.
+ # Nodes which cannot be the LHS of an assignment need not
+ # implement it.
+ #
+ # Expression Analysis phase
+ #
+ # analyse_types
+ # - Call analyse_types on all sub-expressions.
+ # - Check operand types, and wrap coercion nodes around
+ # sub-expressions where needed.
+ # - Set the type of this node.
+ # - If a temporary variable will be required for the
+ # result, set the is_temp flag of this node.
+ #
+ # analyse_target_types
+ # Called during the Analyse Types phase to analyse
+ # the LHS of an assignment or argument of a del
+ # statement. Similar responsibilities to analyse_types.
+ #
+ # target_code
+ # Called by the default implementation of allocate_target_temps.
+ # Should return a C lvalue for assigning to the node. The default
+ # implementation calls calculate_result_code.
+ #
+ # check_const
+ # - Check that this node and its subnodes form a
+ # legal constant expression. If so, do nothing,
+ # otherwise call not_const.
+ #
+ # The default implementation of check_const
+ # assumes that the expression is not constant.
+ #
+ # check_const_addr
+ # - Same as check_const, except check that the
+ # expression is a C lvalue whose address is
+ # constant. Otherwise, call addr_not_const.
+ #
+ # The default implementation of calc_const_addr
+ # assumes that the expression is not a constant
+ # lvalue.
+ #
+ # Code Generation phase
+ #
+ # generate_evaluation_code
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Perform the functions of generate_result_code
+ # (see below).
+ # - If result is temporary, call generate_disposal_code
+ # on all sub-expressions.
+ #
+ # A default implementation of generate_evaluation_code
+ # is provided which uses the following abstract methods:
+ #
+ # generate_result_code
+ # - Generate any C statements necessary to calculate
+ # the result of this node from the results of its
+ # sub-expressions.
+ #
+ # calculate_result_code
+ # - Should return a C code fragment evaluating to the
+ # result. This is only called when the result is not
+ # a temporary.
+ #
+ # generate_assignment_code
+ # Called on the LHS of an assignment.
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Generate code to perform the assignment.
+ # - If the assignment absorbed a reference, call
+ # generate_post_assignment_code on the RHS,
+ # otherwise call generate_disposal_code on it.
+ #
+ # generate_deletion_code
+ # Called on an argument of a del statement.
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Generate code to perform the deletion.
+ # - Call generate_disposal_code on all sub-expressions.
+ #
+ #
+
+ is_sequence_constructor = False
+ is_dict_literal = False
+ is_set_literal = False
+ is_string_literal = False
+ is_attribute = False
+ is_subscript = False
+ is_slice = False
+
+ is_buffer_access = False
+ is_memview_index = False
+ is_memview_slice = False
+ is_memview_broadcast = False
+ is_memview_copy_assignment = False
+
+ saved_subexpr_nodes = None
+ is_temp = False
+ is_target = False
+ is_starred = False
+
+ constant_result = constant_value_not_set
+
+ child_attrs = property(fget=operator.attrgetter('subexprs'))
+
+ def not_implemented(self, method_name):
+ print_call_chain(method_name, "not implemented") ###
+ raise InternalError(
+ "%s.%s not implemented" %
+ (self.__class__.__name__, method_name))
+
+ def is_lvalue(self):
+ return 0
+
+ def is_addressable(self):
+ return self.is_lvalue() and not self.type.is_memoryviewslice
+
+ def is_ephemeral(self):
+ # An ephemeral node is one whose result is in
+ # a Python temporary and we suspect there are no
+ # other references to it. Certain operations are
+ # disallowed on such values, since they are
+ # likely to result in a dangling pointer.
+ return self.type.is_pyobject and self.is_temp
+
+ def subexpr_nodes(self):
+ # Extract a list of subexpression nodes based
+ # on the contents of the subexprs class attribute.
+ nodes = []
+ for name in self.subexprs:
+ item = getattr(self, name)
+ if item is not None:
+ if type(item) is list:
+ nodes.extend(item)
+ else:
+ nodes.append(item)
+ return nodes
+
+ def result(self):
+ if self.is_temp:
+ #if not self.temp_code:
+ # pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
+ # raise RuntimeError("temp result name not set in %s at %r" % (
+ # self.__class__.__name__, pos))
+ return self.temp_code
+ else:
+ return self.calculate_result_code()
+
+ def pythran_result(self, type_=None):
+ if is_pythran_supported_node_or_none(self):
+ return to_pythran(self)
+
+ assert(type_ is not None)
+ return to_pythran(self, type_)
+
+ def is_c_result_required(self):
+ """
+ Subtypes may return False here if result temp allocation can be skipped.
+ """
+ return True
+
+ def result_as(self, type = None):
+ # Return the result code cast to the specified C type.
+ if (self.is_temp and self.type.is_pyobject and
+ type != py_object_type):
+ # Allocated temporaries are always PyObject *, which may not
+ # reflect the actual type (e.g. an extension type)
+ return typecast(type, py_object_type, self.result())
+ return typecast(type, self.ctype(), self.result())
+
+ def py_result(self):
+ # Return the result code cast to PyObject *.
+ return self.result_as(py_object_type)
+
+ def ctype(self):
+ # Return the native C type of the result (i.e. the
+ # C type of the result_code expression).
+ return self.result_ctype or self.type
+
+ def get_constant_c_result_code(self):
+ # Return the constant value of this node as a result code
+ # string, or None if the node is not constant. This method
+ # can be called when the constant result code is required
+ # before the code generation phase.
+ #
+ # The return value is a string that can represent a simple C
+ # value, a constant C name or a constant C expression. If the
+ # node type depends on Python code, this must return None.
+ return None
+
+ def calculate_constant_result(self):
+ # Calculate the constant compile time result value of this
+ # expression and store it in ``self.constant_result``. Does
+ # nothing by default, thus leaving ``self.constant_result``
+ # unknown. If valid, the result can be an arbitrary Python
+ # value.
+ #
+ # This must only be called when it is assured that all
+ # sub-expressions have a valid constant_result value. The
+ # ConstantFolding transform will do this.
+ pass
+
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
+ def compile_time_value(self, denv):
+ # Return value of compile-time expression, or report error.
+ error(self.pos, "Invalid compile-time expression")
+
+ def compile_time_value_error(self, e):
+ error(self.pos, "Error in compile-time expression: %s: %s" % (
+ e.__class__.__name__, e))
+
+ # ------------- Declaration Analysis ----------------
+
+ def analyse_target_declaration(self, env):
+ error(self.pos, "Cannot assign to or delete this")
+
+ # ------------- Expression Analysis ----------------
+
+ def analyse_const_expression(self, env):
+ # Called during the analyse_declarations phase of a
+ # constant expression. Analyses the expression's type,
+ # checks whether it is a legal const expression,
+ # and determines its value.
+ node = self.analyse_types(env)
+ node.check_const()
+ return node
+
+ def analyse_expressions(self, env):
+ # Convenience routine performing both the Type
+ # Analysis and Temp Allocation phases for a whole
+ # expression.
+ return self.analyse_types(env)
+
+ def analyse_target_expression(self, env, rhs):
+ # Convenience routine performing both the Type
+ # Analysis and Temp Allocation phases for the LHS of
+ # an assignment.
+ return self.analyse_target_types(env)
+
+ def analyse_boolean_expression(self, env):
+ # Analyse expression and coerce to a boolean.
+ node = self.analyse_types(env)
+ bool = node.coerce_to_boolean(env)
+ return bool
+
+ def analyse_temp_boolean_expression(self, env):
+ # Analyse boolean expression and coerce result into
+ # a temporary. This is used when a branch is to be
+ # performed on the result and we won't have an
+ # opportunity to ensure disposal code is executed
+ # afterwards. By forcing the result into a temporary,
+ # we ensure that all disposal has been done by the
+ # time we get the result.
+ node = self.analyse_types(env)
+ return node.coerce_to_boolean(env).coerce_to_simple(env)
+
+ # --------------- Type Inference -----------------
+
+ def type_dependencies(self, env):
+ # Returns the list of entries whose types must be determined
+ # before the type of self can be inferred.
+ if hasattr(self, 'type') and self.type is not None:
+ return ()
+ return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
+
+ def infer_type(self, env):
+ # Attempt to deduce the type of self.
+ # Differs from analyse_types as it avoids unnecessary
+ # analysis of subexpressions, but can assume everything
+ # in self.type_dependencies() has been resolved.
+ if hasattr(self, 'type') and self.type is not None:
+ return self.type
+ elif hasattr(self, 'entry') and self.entry is not None:
+ return self.entry.type
+ else:
+ self.not_implemented("infer_type")
+
+ def nonlocally_immutable(self):
+ # Returns whether this variable is a safe reference, i.e.
+ # can't be modified as part of globals or closures.
+ return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
+
+ def inferable_item_node(self, index=0):
+ """
+ Return a node that represents the (type) result of an indexing operation,
+ e.g. for tuple unpacking or iteration.
+ """
+ return IndexNode(self.pos, base=self, index=IntNode(
+ self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
+
+ # --------------- Type Analysis ------------------
+
+ def analyse_as_module(self, env):
+ # If this node can be interpreted as a reference to a
+ # cimported module, return its scope, else None.
+ return None
+
+ def analyse_as_type(self, env):
+ # If this node can be interpreted as a reference to a
+ # type, return that type, else None.
+ return None
+
+ def analyse_as_extension_type(self, env):
+ # If this node can be interpreted as a reference to an
+ # extension type or builtin type, return its type, else None.
+ return None
+
+ def analyse_types(self, env):
+ self.not_implemented("analyse_types")
+
+ def analyse_target_types(self, env):
+ return self.analyse_types(env)
+
+ def nogil_check(self, env):
+ # By default, any expression based on Python objects is
+ # prevented in nogil environments. Subtypes must override
+ # this if they can work without the GIL.
+ if self.type and self.type.is_pyobject:
+ self.gil_error()
+
+ def gil_assignment_check(self, env):
+ if env.nogil and self.type.is_pyobject:
+ error(self.pos, "Assignment of Python object not allowed without gil")
+
+ def check_const(self):
+ self.not_const()
+ return False
+
+ def not_const(self):
+ error(self.pos, "Not allowed in a constant expression")
+
+ def check_const_addr(self):
+ self.addr_not_const()
+ return False
+
+ def addr_not_const(self):
+ error(self.pos, "Address is not constant")
+
+ # ----------------- Result Allocation -----------------
+
+ def result_in_temp(self):
+ # Return true if result is in a temporary owned by
+ # this node or one of its subexpressions. Overridden
+ # by certain nodes which can share the result of
+ # a subnode.
+ return self.is_temp
+
+ def target_code(self):
+ # Return code fragment for use as LHS of a C assignment.
+ return self.calculate_result_code()
+
+ def calculate_result_code(self):
+ self.not_implemented("calculate_result_code")
+
+# def release_target_temp(self, env):
+# # Release temporaries used by LHS of an assignment.
+# self.release_subexpr_temps(env)
+
+ def allocate_temp_result(self, code):
+ if self.temp_code:
+ raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
+ type = self.type
+ if not type.is_void:
+ if type.is_pyobject:
+ type = PyrexTypes.py_object_type
+ elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
+ self.temp_code = None
+ return
+ self.temp_code = code.funcstate.allocate_temp(
+ type, manage_ref=self.use_managed_ref)
+ else:
+ self.temp_code = None
+
+ def release_temp_result(self, code):
+ if not self.temp_code:
+ if not self.result_is_used:
+ # not used anyway, so ignore if not set up
+ return
+ pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
+ if self.old_temp:
+ raise RuntimeError("temp %s released multiple times in %s at %r" % (
+ self.old_temp, self.__class__.__name__, pos))
+ else:
+ raise RuntimeError("no temp, but release requested in %s at %r" % (
+ self.__class__.__name__, pos))
+ code.funcstate.release_temp(self.temp_code)
+ self.old_temp = self.temp_code
+ self.temp_code = None
+
+ # ---------------- Code Generation -----------------
+
+ def make_owned_reference(self, code):
+ """
+ If result is a pyobject, make sure we own a reference to it.
+ If the result is in a temp, it is already a new reference.
+ """
+ if self.type.is_pyobject and not self.result_in_temp():
+ code.put_incref(self.result(), self.ctype())
+
+ def make_owned_memoryviewslice(self, code):
+ """
+ Make sure we own the reference to this memoryview slice.
+ """
+ if not self.result_in_temp():
+ code.put_incref_memoryviewslice(self.result(),
+ have_gil=self.in_nogil_context)
+
+ def generate_evaluation_code(self, code):
+ # Generate code to evaluate this node and
+ # its sub-expressions, and dispose of any
+ # temporary results of its sub-expressions.
+ self.generate_subexpr_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ if self.is_temp:
+ self.allocate_temp_result(code)
+
+ self.generate_result_code(code)
+ if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
+ # If we are temp we do not need to wait until this node is disposed
+ # before disposing children.
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+ def generate_subexpr_evaluation_code(self, code):
+ for node in self.subexpr_nodes():
+ node.generate_evaluation_code(code)
+
+ def generate_result_code(self, code):
+ self.not_implemented("generate_result_code")
+
+ def generate_disposal_code(self, code):
+ if self.is_temp:
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ # postponed from self.generate_evaluation_code()
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ if self.result():
+ if self.type.is_pyobject:
+ code.put_decref_clear(self.result(), self.ctype())
+ elif self.type.is_memoryviewslice:
+ code.put_xdecref_memoryviewslice(
+ self.result(), have_gil=not self.in_nogil_context)
+ code.putln("%s.memview = NULL;" % self.result())
+ code.putln("%s.data = NULL;" % self.result())
+ else:
+ # Already done if self.is_temp
+ self.generate_subexpr_disposal_code(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ # Generate code to dispose of temporary results
+ # of all sub-expressions.
+ for node in self.subexpr_nodes():
+ node.generate_disposal_code(code)
+
+ def generate_post_assignment_code(self, code):
+ if self.is_temp:
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ # postponed from self.generate_evaluation_code()
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ elif self.type.is_pyobject:
+ code.putln("%s = 0;" % self.result())
+ elif self.type.is_memoryviewslice:
+ code.putln("%s.memview = NULL;" % self.result())
+ code.putln("%s.data = NULL;" % self.result())
+ else:
+ self.generate_subexpr_disposal_code(code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ # Stub method for nodes which are not legal as
+ # the LHS of an assignment. An error will have
+ # been reported earlier.
+ pass
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ # Stub method for nodes that are not legal as
+ # the argument of a del statement. An error
+ # will have been reported earlier.
+ pass
+
+ def free_temps(self, code):
+ if self.is_temp:
+ if not self.type.is_void:
+ self.release_temp_result(code)
+ else:
+ self.free_subexpr_temps(code)
+
+ def free_subexpr_temps(self, code):
+ for sub in self.subexpr_nodes():
+ sub.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ pass
+
+ # ---------------- Annotation ---------------------
+
+ def annotate(self, code):
+ for node in self.subexpr_nodes():
+ node.annotate(code)
+
+ # ----------------- Coercion ----------------------
+
+ def coerce_to(self, dst_type, env):
+ # Coerce the result so that it can be assigned to
+ # something of type dst_type. If processing is necessary,
+ # wraps this node in a coercion node and returns that.
+ # Otherwise, returns this node unchanged.
+ #
+ # This method is called during the analyse_expressions
+ # phase of the src_node's processing.
+ #
+ # Note that subclasses that override this (especially
+ # ConstNodes) must not (re-)set their own .type attribute
+ # here. Since expression nodes may turn up in different
+ # places in the tree (e.g. inside of CloneNodes in cascaded
+ # assignments), this method must return a new node instance
+ # if it changes the type.
+ #
+ src = self
+ src_type = self.type
+
+ if self.check_for_coercion_error(dst_type, env):
+ return self
+
+ used_as_reference = dst_type.is_reference
+ if used_as_reference and not src_type.is_reference:
+ dst_type = dst_type.ref_base_type
+
+ if src_type.is_const:
+ src_type = src_type.const_base_type
+
+ if src_type.is_fused or dst_type.is_fused:
+ # See if we are coercing a fused function to a pointer to a
+ # specialized function
+ if (src_type.is_cfunction and not dst_type.is_fused and
+ dst_type.is_ptr and dst_type.base_type.is_cfunction):
+
+ dst_type = dst_type.base_type
+
+ for signature in src_type.get_all_specialized_function_types():
+ if signature.same_as(dst_type):
+ src.type = signature
+ src.entry = src.type.entry
+ src.entry.used = True
+ return self
+
+ if src_type.is_fused:
+ error(self.pos, "Type is not specialized")
+ elif src_type.is_null_ptr and dst_type.is_ptr:
+ # NULL can be implicitly cast to any pointer type
+ return self
+ else:
+ error(self.pos, "Cannot coerce to a type that is not specialized")
+
+ self.type = error_type
+ return self
+
+ if self.coercion_type is not None:
+ # This is purely for error checking purposes!
+ node = NameNode(self.pos, name='', type=self.coercion_type)
+ node.coerce_to(dst_type, env)
+
+ if dst_type.is_memoryviewslice:
+ from . import MemoryView
+ if not src.type.is_memoryviewslice:
+ if src.type.is_pyobject:
+ src = CoerceToMemViewSliceNode(src, dst_type, env)
+ elif src.type.is_array:
+ src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
+ elif not src_type.is_error:
+ error(self.pos,
+ "Cannot convert '%s' to memoryviewslice" % (src_type,))
+ else:
+ if src.type.writable_needed:
+ dst_type.writable_needed = True
+ if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
+ copying=self.is_memview_copy_assignment):
+ if src.type.dtype.same_as(dst_type.dtype):
+ msg = "Memoryview '%s' not conformable to memoryview '%s'."
+ tup = src.type, dst_type
+ else:
+ msg = "Different base types for memoryviews (%s, %s)"
+ tup = src.type.dtype, dst_type.dtype
+
+ error(self.pos, msg % tup)
+
+ elif dst_type.is_pyobject:
+ if not src.type.is_pyobject:
+ if dst_type is bytes_type and src.type.is_int:
+ src = CoerceIntToBytesNode(src, env)
+ else:
+ src = CoerceToPyTypeNode(src, env, type=dst_type)
+ if not src.type.subtype_of(dst_type):
+ if src.constant_result is not None:
+ src = PyTypeTestNode(src, dst_type, env)
+ elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
+ # We let the compiler decide whether this is valid
+ return src
+ elif is_pythran_expr(src.type):
+ if is_pythran_supported_type(dst_type):
+ # Match the case were a pythran expr is assigned to a value, or vice versa.
+ # We let the C++ compiler decide whether this is valid or not!
+ return src
+ # Else, we need to convert the Pythran expression to a Python object
+ src = CoerceToPyTypeNode(src, env, type=dst_type)
+ elif src.type.is_pyobject:
+ if used_as_reference and dst_type.is_cpp_class:
+ warning(
+ self.pos,
+ "Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
+ src = CoerceFromPyTypeNode(dst_type, src, env)
+ elif (dst_type.is_complex
+ and src_type != dst_type
+ and dst_type.assignable_from(src_type)):
+ src = CoerceToComplexNode(src, dst_type, env)
+ else: # neither src nor dst are py types
+ # Added the string comparison, since for c types that
+ # is enough, but Cython gets confused when the types are
+ # in different pxi files.
+ # TODO: Remove this hack and require shared declarations.
+ if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
+ self.fail_assignment(dst_type)
+ return src
+
+ def fail_assignment(self, dst_type):
+ error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
+
+ def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
+ if fail and not default:
+ default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
+ message = find_coercion_error((self.type, dst_type), default, env)
+ if message is not None:
+ error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
+ return True
+ if fail:
+ self.fail_assignment(dst_type)
+ return True
+ return False
+
+ def coerce_to_pyobject(self, env):
+ return self.coerce_to(PyrexTypes.py_object_type, env)
+
+ def coerce_to_boolean(self, env):
+ # Coerce result to something acceptable as
+ # a boolean value.
+
+ # if it's constant, calculate the result now
+ if self.has_constant_result():
+ bool_value = bool(self.constant_result)
+ return BoolNode(self.pos, value=bool_value,
+ constant_result=bool_value)
+
+ type = self.type
+ if type.is_enum or type.is_error:
+ return self
+ elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
+ return CoerceToBooleanNode(self, env)
+ elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"):
+ return SimpleCallNode(
+ self.pos,
+ function=AttributeNode(
+ self.pos, obj=self, attribute=StringEncoding.EncodedString('operator bool')),
+ args=[]).analyse_types(env)
+ elif type.is_ctuple:
+ bool_value = len(type.components) == 0
+ return BoolNode(self.pos, value=bool_value,
+ constant_result=bool_value)
+ else:
+ error(self.pos, "Type '%s' not acceptable as a boolean" % type)
+ return self
+
+ def coerce_to_integer(self, env):
+ # If not already some C integer type, coerce to longint.
+ if self.type.is_int:
+ return self
+ else:
+ return self.coerce_to(PyrexTypes.c_long_type, env)
+
+ def coerce_to_temp(self, env):
+ # Ensure that the result is in a temporary.
+ if self.result_in_temp():
+ return self
+ else:
+ return CoerceToTempNode(self, env)
+
+ def coerce_to_simple(self, env):
+ # Ensure that the result is simple (see is_simple).
+ if self.is_simple():
+ return self
+ else:
+ return self.coerce_to_temp(env)
+
+ def is_simple(self):
+ # A node is simple if its result is something that can
+ # be referred to without performing any operations, e.g.
+ # a constant, local var, C global var, struct member
+ # reference, or temporary.
+ return self.result_in_temp()
+
+ def may_be_none(self):
+ if self.type and not (self.type.is_pyobject or
+ self.type.is_memoryviewslice):
+ return False
+ if self.has_constant_result():
+ return self.constant_result is not None
+ return True
+
+ def as_cython_attribute(self):
+ return None
+
+ def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
+ # Wraps the node in a NoneCheckNode if it is not known to be
+ # not-None (e.g. because it is a Python literal).
+ if self.may_be_none():
+ return NoneCheckNode(self, error, message, format_args)
+ else:
+ return self
+
+ @classmethod
+ def from_node(cls, node, **kwargs):
+ """Instantiate this node class from another node, properly
+ copying over all attributes that one would forget otherwise.
+ """
+ attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
+ for attr_name in attributes:
+ if attr_name in kwargs:
+ continue
+ try:
+ value = getattr(node, attr_name)
+ except AttributeError:
+ pass
+ else:
+ kwargs[attr_name] = value
+ return cls(node.pos, **kwargs)
+
+
+class AtomicExprNode(ExprNode):
+ # Abstract base class for expression nodes which have
+ # no sub-expressions.
+
+ subexprs = []
+
+ # Override to optimize -- we know we have no children
+ def generate_subexpr_evaluation_code(self, code):
+ pass
+ def generate_subexpr_disposal_code(self, code):
+ pass
+
+class PyConstNode(AtomicExprNode):
+ # Abstract base class for constant Python values.
+
+ is_literal = 1
+ type = py_object_type
+
+ def is_simple(self):
+ return 1
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ return self
+
+ def calculate_result_code(self):
+ return self.value
+
+ def generate_result_code(self, code):
+ pass
+
+
+class NoneNode(PyConstNode):
+ # The constant value None
+
+ is_none = 1
+ value = "Py_None"
+
+ constant_result = None
+
+ nogil_check = None
+
+ def compile_time_value(self, denv):
+ return None
+
+ def may_be_none(self):
+ return True
+
+ def coerce_to(self, dst_type, env):
+ if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
+ # Catch this error early and loudly.
+ error(self.pos, "Cannot assign None to %s" % dst_type)
+ return super(NoneNode, self).coerce_to(dst_type, env)
+
+
+class EllipsisNode(PyConstNode):
+ # '...' in a subscript list.
+
+ value = "Py_Ellipsis"
+
+ constant_result = Ellipsis
+
+ def compile_time_value(self, denv):
+ return Ellipsis
+
+
+class ConstNode(AtomicExprNode):
+ # Abstract base type for literal constant nodes.
+ #
+ # value string C code fragment
+
+ is_literal = 1
+ nogil_check = None
+
+ def is_simple(self):
+ return 1
+
+ def nonlocally_immutable(self):
+ return 1
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ return self # Types are held in class variables
+
+ def check_const(self):
+ return True
+
+ def get_constant_c_result_code(self):
+ return self.calculate_result_code()
+
+ def calculate_result_code(self):
+ return str(self.value)
+
+ def generate_result_code(self, code):
+ pass
+
+
+class BoolNode(ConstNode):
+ type = PyrexTypes.c_bint_type
+ # The constant value True or False
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def compile_time_value(self, denv):
+ return self.value
+
+ def calculate_result_code(self):
+ if self.type.is_pyobject:
+ return self.value and 'Py_True' or 'Py_False'
+ else:
+ return str(int(self.value))
+
+ def coerce_to(self, dst_type, env):
+ if dst_type == self.type:
+ return self
+ if dst_type is py_object_type and self.type is Builtin.bool_type:
+ return self
+ if dst_type.is_pyobject and self.type.is_int:
+ return BoolNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=Builtin.bool_type)
+ if dst_type.is_int and self.type.is_pyobject:
+ return BoolNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=PyrexTypes.c_bint_type)
+ return ConstNode.coerce_to(self, dst_type, env)
+
+
+class NullNode(ConstNode):
+ type = PyrexTypes.c_null_ptr_type
+ value = "NULL"
+ constant_result = 0
+
+ def get_constant_c_result_code(self):
+ return self.value
+
+
+class CharNode(ConstNode):
+ type = PyrexTypes.c_char_type
+
+ def calculate_constant_result(self):
+ self.constant_result = ord(self.value)
+
+ def compile_time_value(self, denv):
+ return ord(self.value)
+
+ def calculate_result_code(self):
+ return "'%s'" % StringEncoding.escape_char(self.value)
+
+
+class IntNode(ConstNode):
+
+ # unsigned "" or "U"
+ # longness "" or "L" or "LL"
+ # is_c_literal True/False/None creator considers this a C integer literal
+
+ unsigned = ""
+ longness = ""
+ is_c_literal = None # unknown
+
+ def __init__(self, pos, **kwds):
+ ExprNode.__init__(self, pos, **kwds)
+ if 'type' not in kwds:
+ self.type = self.find_suitable_type_for_value()
+
+ def find_suitable_type_for_value(self):
+ if self.constant_result is constant_value_not_set:
+ try:
+ self.calculate_constant_result()
+ except ValueError:
+ pass
+ # we ignore 'is_c_literal = True' and instead map signed 32bit
+ # integers as C long values
+ if self.is_c_literal or \
+ not self.has_constant_result() or \
+ self.unsigned or self.longness == 'LL':
+ # clearly a C literal
+ rank = (self.longness == 'LL') and 2 or 1
+ suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
+ if self.type:
+ suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
+ else:
+ # C literal or Python literal - split at 32bit boundary
+ if -2**31 <= self.constant_result < 2**31:
+ if self.type and self.type.is_int:
+ suitable_type = self.type
+ else:
+ suitable_type = PyrexTypes.c_long_type
+ else:
+ suitable_type = PyrexTypes.py_object_type
+ return suitable_type
+
+ def coerce_to(self, dst_type, env):
+ if self.type is dst_type:
+ return self
+ elif dst_type.is_float:
+ if self.has_constant_result():
+ return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
+ constant_result=float(self.constant_result))
+ else:
+ return FloatNode(self.pos, value=self.value, type=dst_type,
+ constant_result=not_a_constant)
+ if dst_type.is_numeric and not dst_type.is_complex:
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ type=dst_type, is_c_literal=True,
+ unsigned=self.unsigned, longness=self.longness)
+ return node
+ elif dst_type.is_pyobject:
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ type=PyrexTypes.py_object_type, is_c_literal=False,
+ unsigned=self.unsigned, longness=self.longness)
+ else:
+ # FIXME: not setting the type here to keep it working with
+ # complex numbers. Should they be special cased?
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ unsigned=self.unsigned, longness=self.longness)
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return ConstNode.coerce_to(node, dst_type, env)
+
+ def coerce_to_boolean(self, env):
+ return IntNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=PyrexTypes.c_bint_type,
+ unsigned=self.unsigned, longness=self.longness)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ # pre-allocate a Python version of the number
+ plain_integer_string = str(Utils.str_to_number(self.value))
+ self.result_code = code.get_py_int(plain_integer_string, self.longness)
+ else:
+ self.result_code = self.get_constant_c_result_code()
+
+ def get_constant_c_result_code(self):
+ unsigned, longness = self.unsigned, self.longness
+ literal = self.value_as_c_integer_string()
+ if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
+ # negative decimal literal => guess longness from type to prevent wrap-around
+ if self.type.rank >= PyrexTypes.c_longlong_type.rank:
+ longness = 'LL'
+ elif self.type.rank >= PyrexTypes.c_long_type.rank:
+ longness = 'L'
+ return literal + unsigned + longness
+
+ def value_as_c_integer_string(self):
+ value = self.value
+ if len(value) <= 2:
+ # too short to go wrong (and simplifies code below)
+ return value
+ neg_sign = ''
+ if value[0] == '-':
+ neg_sign = '-'
+ value = value[1:]
+ if value[0] == '0':
+ literal_type = value[1] # 0'o' - 0'b' - 0'x'
+ # 0x123 hex literals and 0123 octal literals work nicely in C
+ # but C-incompatible Py3 oct/bin notations need conversion
+ if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
+ # negative hex/octal literal => prevent C compiler from using
+ # unsigned integer types by converting to decimal (see C standard 6.4.4.1)
+ value = str(Utils.str_to_number(value))
+ elif literal_type in 'oO':
+ value = '0' + value[2:] # '0o123' => '0123'
+ elif literal_type in 'bB':
+ value = str(int(value[2:], 2))
+ elif value.isdigit() and not self.unsigned and not self.longness:
+ if not neg_sign:
+ # C compilers do not consider unsigned types for decimal literals,
+ # but they do for hex (see C standard 6.4.4.1)
+ value = '0x%X' % int(value)
+ return neg_sign + value
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def calculate_constant_result(self):
+ self.constant_result = Utils.str_to_number(self.value)
+
+ def compile_time_value(self, denv):
+ return Utils.str_to_number(self.value)
+
+class FloatNode(ConstNode):
+ type = PyrexTypes.c_double_type
+
+ def calculate_constant_result(self):
+ self.constant_result = float(self.value)
+
+ def compile_time_value(self, denv):
+ return float(self.value)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject and self.type.is_float:
+ return FloatNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=Builtin.float_type)
+ if dst_type.is_float and self.type.is_pyobject:
+ return FloatNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=dst_type)
+ return ConstNode.coerce_to(self, dst_type, env)
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def get_constant_c_result_code(self):
+ strval = self.value
+ assert isinstance(strval, basestring)
+ cmpval = repr(float(strval))
+ if cmpval == 'nan':
+ return "(Py_HUGE_VAL * 0)"
+ elif cmpval == 'inf':
+ return "Py_HUGE_VAL"
+ elif cmpval == '-inf':
+ return "(-Py_HUGE_VAL)"
+ else:
+ return strval
+
+ def generate_evaluation_code(self, code):
+ c_value = self.get_constant_c_result_code()
+ if self.type.is_pyobject:
+ self.result_code = code.get_py_float(self.value, c_value)
+ else:
+ self.result_code = c_value
+
+
+def _analyse_name_as_type(name, pos, env):
+ type = PyrexTypes.parse_basic_type(name)
+ if type is not None:
+ return type
+
+ global_entry = env.global_scope().lookup(name)
+ if global_entry and global_entry.type and (
+ global_entry.type.is_extension_type
+ or global_entry.type.is_struct_or_union
+ or global_entry.type.is_builtin_type
+ or global_entry.type.is_cpp_class):
+ return global_entry.type
+
+ from .TreeFragment import TreeFragment
+ with local_errors(ignore=True):
+ pos = (pos[0], pos[1], pos[2]-7)
+ try:
+ declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
+ except CompileError:
+ pass
+ else:
+ sizeof_node = declaration.root.stats[0].expr
+ if isinstance(sizeof_node, SizeofTypeNode):
+ sizeof_node = sizeof_node.analyse_types(env)
+ if isinstance(sizeof_node, SizeofTypeNode):
+ return sizeof_node.arg_type
+ return None
+
+
+class BytesNode(ConstNode):
+ # A char* or bytes literal
+ #
+ # value BytesLiteral
+
+ is_string_literal = True
+ # start off as Python 'bytes' to support len() in O(1)
+ type = bytes_type
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def as_sliced_node(self, start, stop, step=None):
+ value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
+ return BytesNode(self.pos, value=value, constant_result=value)
+
+ def compile_time_value(self, denv):
+ return self.value.byteencode()
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
+
+ def can_coerce_to_char_literal(self):
+ return len(self.value) == 1
+
+ def coerce_to_boolean(self, env):
+ # This is special because testing a C char* for truth directly
+ # would yield the wrong result.
+ bool_value = bool(self.value)
+ return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
+
+ def coerce_to(self, dst_type, env):
+ if self.type == dst_type:
+ return self
+ if dst_type.is_int:
+ if not self.can_coerce_to_char_literal():
+ error(self.pos, "Only single-character string literals can be coerced into ints.")
+ return self
+ if dst_type.is_unicode_char:
+ error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
+ return self
+ return CharNode(self.pos, value=self.value,
+ constant_result=ord(self.value))
+
+ node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
+ if dst_type.is_pyobject:
+ if dst_type in (py_object_type, Builtin.bytes_type):
+ node.type = Builtin.bytes_type
+ else:
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return node
+ elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
+ node.type = dst_type
+ return node
+ elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
+ node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
+ else PyrexTypes.c_char_ptr_type)
+ return CastNode(node, dst_type)
+ elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
+ # Exclude the case of passing a C string literal into a non-const C++ string.
+ if not dst_type.is_cpp_class or dst_type.is_const:
+ node.type = dst_type
+ return node
+
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return ConstNode.coerce_to(node, dst_type, env)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ result = code.get_py_string_const(self.value)
+ elif self.type.is_const:
+ result = code.get_string_const(self.value)
+ else:
+ # not const => use plain C string literal and cast to mutable type
+ literal = self.value.as_c_string_literal()
+ # C++ may require a cast
+ result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
+ self.result_code = result
+
+ def get_constant_c_result_code(self):
+ return None # FIXME
+
+ def calculate_result_code(self):
+ return self.result_code
+
+
+class UnicodeNode(ConstNode):
+ # A Py_UNICODE* or unicode literal
+ #
+ # value EncodedString
+ # bytes_value BytesLiteral the literal parsed as bytes string
+ # ('-3' unicode literals only)
+
+ is_string_literal = True
+ bytes_value = None
+ type = unicode_type
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.value, self.pos, env)
+
+ def as_sliced_node(self, start, stop, step=None):
+ if StringEncoding.string_contains_surrogates(self.value[:stop]):
+ # this is unsafe as it may give different results
+ # in different runtimes
+ return None
+ value = StringEncoding.EncodedString(self.value[start:stop:step])
+ value.encoding = self.value.encoding
+ if self.bytes_value is not None:
+ bytes_value = StringEncoding.bytes_literal(
+ self.bytes_value[start:stop:step], self.bytes_value.encoding)
+ else:
+ bytes_value = None
+ return UnicodeNode(
+ self.pos, value=value, bytes_value=bytes_value,
+ constant_result=value)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type is self.type:
+ pass
+ elif dst_type.is_unicode_char:
+ if not self.can_coerce_to_char_literal():
+ error(self.pos,
+ "Only single-character Unicode string literals or "
+ "surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
+ return self
+ int_value = ord(self.value)
+ return IntNode(self.pos, type=dst_type, value=str(int_value),
+ constant_result=int_value)
+ elif not dst_type.is_pyobject:
+ if dst_type.is_string and self.bytes_value is not None:
+ # special case: '-3' enforced unicode literal used in a
+ # C char* context
+ return BytesNode(self.pos, value=self.bytes_value
+ ).coerce_to(dst_type, env)
+ if dst_type.is_pyunicode_ptr:
+ node = UnicodeNode(self.pos, value=self.value)
+ node.type = dst_type
+ return node
+ error(self.pos,
+ "Unicode literals do not support coercion to C types other "
+ "than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
+ "(for strings).")
+ elif dst_type not in (py_object_type, Builtin.basestring_type):
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return self
+
+ def can_coerce_to_char_literal(self):
+ return len(self.value) == 1
+ ## or (len(self.value) == 2
+ ## and (0xD800 <= self.value[0] <= 0xDBFF)
+ ## and (0xDC00 <= self.value[1] <= 0xDFFF))
+
+ def coerce_to_boolean(self, env):
+ bool_value = bool(self.value)
+ return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
+
+ def contains_surrogates(self):
+ return StringEncoding.string_contains_surrogates(self.value)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ # FIXME: this should go away entirely!
+ # Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2,
+ # Py2 can generate different code from Py3 here. Let's hope we get away with claiming that
+ # the processing of surrogate pairs in code was always ambiguous and lead to different results
+ # on P16/32bit Unicode platforms.
+ if StringEncoding.string_contains_lone_surrogates(self.value):
+ # lone (unpaired) surrogates are not really portable and cannot be
+ # decoded by the UTF-8 codec in Py3.3
+ self.result_code = code.get_py_const(py_object_type, 'ustring')
+ data_cname = code.get_string_const(
+ StringEncoding.BytesLiteral(self.value.encode('unicode_escape')))
+ const_code = code.get_cached_constants_writer(self.result_code)
+ if const_code is None:
+ return # already initialised
+ const_code.mark_pos(self.pos)
+ const_code.putln(
+ "%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % (
+ self.result_code,
+ data_cname,
+ data_cname,
+ const_code.error_goto_if_null(self.result_code, self.pos)))
+ const_code.put_error_if_neg(
+ self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
+ else:
+ self.result_code = code.get_py_string_const(self.value)
+ else:
+ self.result_code = code.get_pyunicode_ptr_const(self.value)
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def compile_time_value(self, env):
+ return self.value
+
+
+class StringNode(PyConstNode):
+ # A Python str object, i.e. a byte string in Python 2.x and a
+ # unicode string in Python 3.x
+ #
+ # value BytesLiteral (or EncodedString with ASCII content)
+ # unicode_value EncodedString or None
+ # is_identifier boolean
+
+ type = str_type
+ is_string_literal = True
+ is_identifier = None
+ unicode_value = None
+
+ def calculate_constant_result(self):
+ if self.unicode_value is not None:
+ # only the Unicode value is portable across Py2/3
+ self.constant_result = self.unicode_value
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
+
+ def as_sliced_node(self, start, stop, step=None):
+ value = type(self.value)(self.value[start:stop:step])
+ value.encoding = self.value.encoding
+ if self.unicode_value is not None:
+ if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
+ # this is unsafe as it may give different results in different runtimes
+ return None
+ unicode_value = StringEncoding.EncodedString(
+ self.unicode_value[start:stop:step])
+ else:
+ unicode_value = None
+ return StringNode(
+ self.pos, value=value, unicode_value=unicode_value,
+ constant_result=value, is_identifier=self.is_identifier)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
+# if dst_type is Builtin.bytes_type:
+# # special case: bytes = 'str literal'
+# return BytesNode(self.pos, value=self.value)
+ if not dst_type.is_pyobject:
+ return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
+ if dst_type is not Builtin.basestring_type:
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return self
+
+ def can_coerce_to_char_literal(self):
+ return not self.is_identifier and len(self.value) == 1
+
+ def generate_evaluation_code(self, code):
+ self.result_code = code.get_py_string_const(
+ self.value, identifier=self.is_identifier, is_str=True,
+ unicode_value=self.unicode_value)
+
+ def get_constant_c_result_code(self):
+ return None
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def compile_time_value(self, env):
+ if self.value.is_unicode:
+ return self.value
+ if not IS_PYTHON3:
+ # use plain str/bytes object in Py2
+ return self.value.byteencode()
+ # in Py3, always return a Unicode string
+ if self.unicode_value is not None:
+ return self.unicode_value
+ return self.value.decode('iso8859-1')
+
+
+class IdentifierStringNode(StringNode):
+ # A special str value that represents an identifier (bytes in Py2,
+ # unicode in Py3).
+ is_identifier = True
+
+
+class ImagNode(AtomicExprNode):
+ # Imaginary number literal
+ #
+ # value string imaginary part (float value)
+
+ type = PyrexTypes.c_double_complex_type
+
+ def calculate_constant_result(self):
+ self.constant_result = complex(0.0, float(self.value))
+
+ def compile_time_value(self, denv):
+ return complex(0.0, float(self.value))
+
+ def analyse_types(self, env):
+ self.type.create_declaration_utility_code(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def coerce_to(self, dst_type, env):
+ if self.type is dst_type:
+ return self
+ node = ImagNode(self.pos, value=self.value)
+ if dst_type.is_pyobject:
+ node.is_temp = 1
+ node.type = Builtin.complex_type
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return AtomicExprNode.coerce_to(node, dst_type, env)
+
+ gil_message = "Constructing complex number"
+
+ def calculate_result_code(self):
+ if self.type.is_pyobject:
+ return self.result()
+ else:
+ return "%s(0, %r)" % (self.type.from_parts, float(self.value))
+
+ def generate_result_code(self, code):
+ if self.type.is_pyobject:
+ code.putln(
+ "%s = PyComplex_FromDoubles(0.0, %r); %s" % (
+ self.result(),
+ float(self.value),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class NewExprNode(AtomicExprNode):
+
+ # C++ new statement
+ #
+ # cppclass node c++ class to create
+
+ type = None
+
+ def infer_type(self, env):
+ type = self.cppclass.analyse_as_type(env)
+ if type is None or not type.is_cpp_class:
+ error(self.pos, "new operator can only be applied to a C++ class")
+ self.type = error_type
+ return
+ self.cpp_check(env)
+ constructor = type.get_constructor(self.pos)
+ self.class_type = type
+ self.entry = constructor
+ self.type = constructor.type
+ return self.type
+
+ def analyse_types(self, env):
+ if self.type is None:
+ self.infer_type(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ pass
+
+ def calculate_result_code(self):
+ return "new " + self.class_type.empty_declaration_code()
+
+
+class NameNode(AtomicExprNode):
+ # Reference to a local or global variable name.
+ #
+ # name string Python name of the variable
+ # entry Entry Symbol table entry
+ # type_entry Entry For extension type names, the original type entry
+ # cf_is_null boolean Is uninitialized before this node
+ # cf_maybe_null boolean Maybe uninitialized before this node
+ # allow_null boolean Don't raise UnboundLocalError
+ # nogil boolean Whether it is used in a nogil context
+
+ is_name = True
+ is_cython_module = False
+ cython_attribute = None
+ lhs_of_first_assignment = False # TODO: remove me
+ is_used_as_rvalue = 0
+ entry = None
+ type_entry = None
+ cf_maybe_null = True
+ cf_is_null = False
+ allow_null = False
+ nogil = False
+ inferred_type = None
+
+ def as_cython_attribute(self):
+ return self.cython_attribute
+
+ def type_dependencies(self, env):
+ if self.entry is None:
+ self.entry = env.lookup(self.name)
+ if self.entry is not None and self.entry.type.is_unspecified:
+ return (self,)
+ else:
+ return ()
+
+ def infer_type(self, env):
+ if self.entry is None:
+ self.entry = env.lookup(self.name)
+ if self.entry is None or self.entry.type is unspecified_type:
+ if self.inferred_type is not None:
+ return self.inferred_type
+ return py_object_type
+ elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
+ self.name == self.entry.type.name:
+ # Unfortunately the type attribute of type objects
+ # is used for the pointer to the type they represent.
+ return type_type
+ elif self.entry.type.is_cfunction:
+ if self.entry.scope.is_builtin_scope:
+ # special case: optimised builtin functions must be treated as Python objects
+ return py_object_type
+ else:
+ # special case: referring to a C function must return its pointer
+ return PyrexTypes.CPtrType(self.entry.type)
+ else:
+ # If entry is inferred as pyobject it's safe to use local
+ # NameNode's inferred_type.
+ if self.entry.type.is_pyobject and self.inferred_type:
+ # Overflow may happen if integer
+ if not (self.inferred_type.is_int and self.entry.might_overflow):
+ return self.inferred_type
+ return self.entry.type
+
+ def compile_time_value(self, denv):
+ try:
+ return denv.lookup(self.name)
+ except KeyError:
+ error(self.pos, "Compile-time name '%s' not defined" % self.name)
+
+ def get_constant_c_result_code(self):
+ if not self.entry or self.entry.type.is_pyobject:
+ return None
+ return self.entry.cname
+
+ def coerce_to(self, dst_type, env):
+ # If coercing to a generic pyobject and this is a builtin
+ # C function with a Python equivalent, manufacture a NameNode
+ # referring to the Python builtin.
+ #print "NameNode.coerce_to:", self.name, dst_type ###
+ if dst_type is py_object_type:
+ entry = self.entry
+ if entry and entry.is_cfunction:
+ var_entry = entry.as_variable
+ if var_entry:
+ if var_entry.is_builtin and var_entry.is_const:
+ var_entry = env.declare_builtin(var_entry.name, self.pos)
+ node = NameNode(self.pos, name = self.name)
+ node.entry = var_entry
+ node.analyse_rvalue_entry(env)
+ return node
+
+ return super(NameNode, self).coerce_to(dst_type, env)
+
+ def declare_from_annotation(self, env, as_target=False):
+ """Implements PEP 526 annotation typing in a fairly relaxed way.
+
+ Annotations are ignored for global variables, Python class attributes and already declared variables.
+ String literals are allowed and ignored.
+ The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
+ """
+ if not env.directives['annotation_typing']:
+ return
+ if env.is_module_scope or env.is_py_class_scope:
+ # annotations never create global cdef names and Python classes don't support them anyway
+ return
+ name = self.name
+ if self.entry or env.lookup_here(name) is not None:
+ # already declared => ignore annotation
+ return
+
+ annotation = self.annotation
+ if annotation.is_string_literal:
+ # name: "description" => not a type, but still a declared variable or attribute
+ atype = None
+ else:
+ _, atype = analyse_type_annotation(annotation, env)
+ if atype is None:
+ atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
+ self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
+ self.entry.annotation = annotation
+
+ def analyse_as_module(self, env):
+ # Try to interpret this as a reference to a cimported module.
+ # Returns the module scope, or None.
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and entry.as_module:
+ return entry.as_module
+ return None
+
+ def analyse_as_type(self, env):
+ if self.cython_attribute:
+ type = PyrexTypes.parse_basic_type(self.cython_attribute)
+ else:
+ type = PyrexTypes.parse_basic_type(self.name)
+ if type:
+ return type
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and entry.is_type:
+ return entry.type
+ else:
+ return None
+
+ def analyse_as_extension_type(self, env):
+ # Try to interpret this as a reference to an extension type.
+ # Returns the extension type, or None.
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and entry.is_type:
+ if entry.type.is_extension_type or entry.type.is_builtin_type:
+ return entry.type
+ return None
+
+ def analyse_target_declaration(self, env):
+ if not self.entry:
+ self.entry = env.lookup_here(self.name)
+ if not self.entry and self.annotation is not None:
+ # name : type = ...
+ self.declare_from_annotation(env, as_target=True)
+ if not self.entry:
+ if env.directives['warn.undeclared']:
+ warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
+ if env.directives['infer_types'] != False:
+ type = unspecified_type
+ else:
+ type = py_object_type
+ self.entry = env.declare_var(self.name, type, self.pos)
+ if self.entry.is_declared_generic:
+ self.result_ctype = py_object_type
+ if self.entry.as_module:
+ # cimported modules namespace can shadow actual variables
+ self.entry.is_variable = 1
+
+ def analyse_types(self, env):
+ self.initialized_check = env.directives['initializedcheck']
+ entry = self.entry
+ if entry is None:
+ entry = env.lookup(self.name)
+ if not entry:
+ entry = env.declare_builtin(self.name, self.pos)
+ if entry and entry.is_builtin and entry.is_const:
+ self.is_literal = True
+ if not entry:
+ self.type = PyrexTypes.error_type
+ return self
+ self.entry = entry
+ entry.used = 1
+ if entry.type.is_buffer:
+ from . import Buffer
+ Buffer.used_buffer_aux_vars(entry)
+ self.analyse_rvalue_entry(env)
+ return self
+
+ def analyse_target_types(self, env):
+ self.analyse_entry(env, is_target=True)
+
+ entry = self.entry
+ if entry.is_cfunction and entry.as_variable:
+ # FIXME: unify "is_overridable" flags below
+ if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction:
+ # We need this for assigning to cpdef names and for the fused 'def' TreeFragment
+ entry = self.entry = entry.as_variable
+ self.type = entry.type
+
+ if self.type.is_const:
+ error(self.pos, "Assignment to const '%s'" % self.name)
+ if self.type.is_reference:
+ error(self.pos, "Assignment to reference '%s'" % self.name)
+ if not self.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
+ self.type = PyrexTypes.error_type
+ entry.used = 1
+ if entry.type.is_buffer:
+ from . import Buffer
+ Buffer.used_buffer_aux_vars(entry)
+ return self
+
+ def analyse_rvalue_entry(self, env):
+ #print "NameNode.analyse_rvalue_entry:", self.name ###
+ #print "Entry:", self.entry.__dict__ ###
+ self.analyse_entry(env)
+ entry = self.entry
+
+ if entry.is_declared_generic:
+ self.result_ctype = py_object_type
+
+ if entry.is_pyglobal or entry.is_builtin:
+ if entry.is_builtin and entry.is_const:
+ self.is_temp = 0
+ else:
+ self.is_temp = 1
+
+ self.is_used_as_rvalue = 1
+ elif entry.type.is_memoryviewslice:
+ self.is_temp = False
+ self.is_used_as_rvalue = True
+ self.use_managed_ref = True
+ return self
+
+ def nogil_check(self, env):
+ self.nogil = True
+ if self.is_used_as_rvalue:
+ entry = self.entry
+ if entry.is_builtin:
+ if not entry.is_const: # cached builtins are ok
+ self.gil_error()
+ elif entry.is_pyglobal:
+ self.gil_error()
+
+ gil_message = "Accessing Python global or builtin"
+
+ def analyse_entry(self, env, is_target=False):
+ #print "NameNode.analyse_entry:", self.name ###
+ self.check_identifier_kind()
+ entry = self.entry
+ type = entry.type
+ if (not is_target and type.is_pyobject and self.inferred_type and
+ self.inferred_type.is_builtin_type):
+ # assume that type inference is smarter than the static entry
+ type = self.inferred_type
+ self.type = type
+
+ def check_identifier_kind(self):
+ # Check that this is an appropriate kind of name for use in an
+ # expression. Also finds the variable entry associated with
+ # an extension type.
+ entry = self.entry
+ if entry.is_type and entry.type.is_extension_type:
+ self.type_entry = entry
+ if entry.is_type and entry.type.is_enum:
+ py_entry = Symtab.Entry(self.name, None, py_object_type)
+ py_entry.is_pyglobal = True
+ py_entry.scope = self.entry.scope
+ self.entry = py_entry
+ elif not (entry.is_const or entry.is_variable or
+ entry.is_builtin or entry.is_cfunction or
+ entry.is_cpp_class):
+ if self.entry.as_variable:
+ self.entry = self.entry.as_variable
+ elif not self.is_cython_module:
+ error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
+
+ def is_cimported_module_without_shadow(self, env):
+ if self.is_cython_module or self.cython_attribute:
+ return False
+ entry = self.entry or env.lookup(self.name)
+ return entry.as_module and not entry.is_variable
+
+ def is_simple(self):
+ # If it's not a C variable, it'll be in a temp.
+ return 1
+
+ def may_be_none(self):
+ if self.cf_state and self.type and (self.type.is_pyobject or
+ self.type.is_memoryviewslice):
+ # gard against infinite recursion on self-dependencies
+ if getattr(self, '_none_checking', False):
+ # self-dependency - either this node receives a None
+ # value from *another* node, or it can not reference
+ # None at this point => safe to assume "not None"
+ return False
+ self._none_checking = True
+ # evaluate control flow state to see if there were any
+ # potential None values assigned to the node so far
+ may_be_none = False
+ for assignment in self.cf_state:
+ if assignment.rhs.may_be_none():
+ may_be_none = True
+ break
+ del self._none_checking
+ return may_be_none
+ return super(NameNode, self).may_be_none()
+
+ def nonlocally_immutable(self):
+ if ExprNode.nonlocally_immutable(self):
+ return True
+ entry = self.entry
+ if not entry or entry.in_closure:
+ return False
+ return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
+
+ def calculate_target_results(self, env):
+ pass
+
+ def check_const(self):
+ entry = self.entry
+ if entry is not None and not (
+ entry.is_const or
+ entry.is_cfunction or
+ entry.is_builtin or
+ entry.type.is_const):
+ self.not_const()
+ return False
+ return True
+
+ def check_const_addr(self):
+ entry = self.entry
+ if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
+ self.addr_not_const()
+ return False
+ return True
+
+ def is_lvalue(self):
+ return (
+ self.entry.is_variable and
+ not self.entry.is_readonly
+ ) or (
+ self.entry.is_cfunction and
+ self.entry.is_overridable
+ )
+
+ def is_addressable(self):
+ return self.entry.is_variable and not self.type.is_memoryviewslice
+
+ def is_ephemeral(self):
+ # Name nodes are never ephemeral, even if the
+ # result is in a temporary.
+ return 0
+
+ def calculate_result_code(self):
+ entry = self.entry
+ if not entry:
+ return "<error>" # There was an error earlier
+ return entry.cname
+
+ def generate_result_code(self, code):
+ assert hasattr(self, 'entry')
+ entry = self.entry
+ if entry is None:
+ return # There was an error earlier
+ if entry.utility_code:
+ code.globalstate.use_utility_code(entry.utility_code)
+ if entry.is_builtin and entry.is_const:
+ return # Lookup already cached
+ elif entry.is_pyclass_attr:
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ if entry.is_builtin:
+ namespace = Naming.builtins_cname
+ else: # entry.is_pyglobal
+ namespace = entry.scope.namespace_cname
+ if not self.cf_is_null:
+ code.putln(
+ '%s = PyObject_GetItem(%s, %s);' % (
+ self.result(),
+ namespace,
+ interned_cname))
+ code.putln('if (unlikely(!%s)) {' % self.result())
+ code.putln('PyErr_Clear();')
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetModuleGlobalName(%s, %s);' % (
+ self.result(),
+ interned_cname))
+ if not self.cf_is_null:
+ code.putln("}")
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ code.put_gotref(self.py_result())
+
+ elif entry.is_builtin and not entry.scope.is_module_scope:
+ # known builtin
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
+ code.putln(
+ '%s = __Pyx_GetBuiltinName(%s); %s' % (
+ self.result(),
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
+ # name in class body, global name or unknown builtin
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ if entry.scope.is_module_scope:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetModuleGlobalName(%s, %s); %s' % (
+ self.result(),
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ # FIXME: is_pyglobal is also used for class namespace
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetNameInClass(%s, %s, %s); %s' % (
+ self.result(),
+ entry.scope.namespace_cname,
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
+ # Raise UnboundLocalError for objects and memoryviewslices
+ raise_unbound = (
+ (self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
+ null_code = entry.type.check_for_null_code(entry.cname)
+
+ memslice_check = entry.type.is_memoryviewslice and self.initialized_check
+
+ if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
+ code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ #print "NameNode.generate_assignment_code:", self.name ###
+ entry = self.entry
+ if entry is None:
+ return # There was an error earlier
+
+ if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
+ and not self.lhs_of_first_assignment and not rhs.in_module_scope):
+ error(self.pos, "Literal list must be assigned to pointer at time of declaration")
+
+ # is_pyglobal seems to be True for module level-globals only.
+ # We use this to access class->tp_dict if necessary.
+ if entry.is_pyglobal:
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ namespace = self.entry.scope.namespace_cname
+ if entry.is_member:
+ # if the entry is a member we have to cheat: SetAttr does not work
+ # on types, so we create a descriptor which is then added to tp_dict
+ setter = 'PyDict_SetItem'
+ namespace = '%s->tp_dict' % namespace
+ elif entry.scope.is_module_scope:
+ setter = 'PyDict_SetItem'
+ namespace = Naming.moddict_cname
+ elif entry.is_pyclass_attr:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
+ setter = '__Pyx_SetNameInClass'
+ else:
+ assert False, repr(entry)
+ code.put_error_if_neg(
+ self.pos,
+ '%s(%s, %s, %s)' % (
+ setter,
+ namespace,
+ interned_cname,
+ rhs.py_result()))
+ if debug_disposal_code:
+ print("NameNode.generate_assignment_code:")
+ print("...generating disposal code for %s" % rhs)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ if entry.is_member:
+ # in Py2.6+, we need to invalidate the method cache
+ code.putln("PyType_Modified(%s);" %
+ entry.scope.parent_type.typeptr_cname)
+ else:
+ if self.type.is_memoryviewslice:
+ self.generate_acquire_memoryviewslice(rhs, code)
+
+ elif self.type.is_buffer:
+ # Generate code for doing the buffer release/acquisition.
+ # This might raise an exception in which case the assignment (done
+ # below) will not happen.
+ #
+ # The reason this is not in a typetest-like node is because the
+ # variables that the acquired buffer info is stored to is allocated
+ # per entry and coupled with it.
+ self.generate_acquire_buffer(rhs, code)
+ assigned = False
+ if self.type.is_pyobject:
+ #print "NameNode.generate_assignment_code: to", self.name ###
+ #print "...from", rhs ###
+ #print "...LHS type", self.type, "ctype", self.ctype() ###
+ #print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
+ if self.use_managed_ref:
+ rhs.make_owned_reference(code)
+ is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
+ if is_external_ref:
+ if not self.cf_is_null:
+ if self.cf_maybe_null:
+ code.put_xgotref(self.py_result())
+ else:
+ code.put_gotref(self.py_result())
+ assigned = True
+ if entry.is_cglobal:
+ code.put_decref_set(
+ self.result(), rhs.result_as(self.ctype()))
+ else:
+ if not self.cf_is_null:
+ if self.cf_maybe_null:
+ code.put_xdecref_set(
+ self.result(), rhs.result_as(self.ctype()))
+ else:
+ code.put_decref_set(
+ self.result(), rhs.result_as(self.ctype()))
+ else:
+ assigned = False
+ if is_external_ref:
+ code.put_giveref(rhs.py_result())
+ if not self.type.is_memoryviewslice:
+ if not assigned:
+ if overloaded_assignment:
+ result = rhs.result()
+ if exception_check == '+':
+ translate_cpp_exception(
+ code, self.pos,
+ '%s = %s;' % (self.result(), result),
+ self.result() if self.type.is_pyobject else None,
+ exception_value, self.in_nogil_context)
+ else:
+ code.putln('%s = %s;' % (self.result(), result))
+ else:
+ result = rhs.result_as(self.ctype())
+
+ if is_pythran_expr(self.type):
+ code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
+ elif result != self.result():
+ code.putln('%s = %s;' % (self.result(), result))
+ if debug_disposal_code:
+ print("NameNode.generate_assignment_code:")
+ print("...generating post-assignment code for %s" % rhs)
+ rhs.generate_post_assignment_code(code)
+ elif rhs.result_in_temp():
+ rhs.generate_post_assignment_code(code)
+
+ rhs.free_temps(code)
+
+ def generate_acquire_memoryviewslice(self, rhs, code):
+ """
+ Slices, coercions from objects, return values etc are new references.
+ We have a borrowed reference in case of dst = src
+ """
+ from . import MemoryView
+
+ MemoryView.put_acquire_memoryviewslice(
+ lhs_cname=self.result(),
+ lhs_type=self.type,
+ lhs_pos=self.pos,
+ rhs=rhs,
+ code=code,
+ have_gil=not self.in_nogil_context,
+ first_assignment=self.cf_is_null)
+
+ def generate_acquire_buffer(self, rhs, code):
+ # rhstmp is only used in case the rhs is a complicated expression leading to
+ # the object, to avoid repeating the same C expression for every reference
+ # to the rhs. It does NOT hold a reference.
+ pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
+ if pretty_rhs:
+ rhstmp = rhs.result_as(self.ctype())
+ else:
+ rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
+ code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
+
+ from . import Buffer
+ Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
+ is_initialized=not self.lhs_of_first_assignment,
+ pos=self.pos, code=code)
+
+ if not pretty_rhs:
+ code.putln("%s = 0;" % rhstmp)
+ code.funcstate.release_temp(rhstmp)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ if self.entry is None:
+ return # There was an error earlier
+ elif self.entry.is_pyclass_attr:
+ namespace = self.entry.scope.namespace_cname
+ interned_cname = code.intern_identifier(self.entry.name)
+ if ignore_nonexisting:
+ key_error_code = 'PyErr_Clear(); else'
+ else:
+ # minor hack: fake a NameError on KeyError
+ key_error_code = (
+ '{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
+ self.entry.name)
+ code.putln(
+ 'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
+ ' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
+ ' %s '
+ '}' % (namespace, interned_cname,
+ key_error_code,
+ code.error_goto(self.pos)))
+ elif self.entry.is_pyglobal:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ interned_cname = code.intern_identifier(self.entry.name)
+ del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
+ Naming.module_cname, interned_cname)
+ if ignore_nonexisting:
+ code.putln(
+ 'if (unlikely(%s < 0)) {'
+ ' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
+ '}' % (del_code, code.error_goto(self.pos)))
+ else:
+ code.put_error_if_neg(self.pos, del_code)
+ elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
+ if not self.cf_is_null:
+ if self.cf_maybe_null and not ignore_nonexisting:
+ code.put_error_if_unbound(self.pos, self.entry)
+
+ if self.entry.type.is_pyobject:
+ if self.entry.in_closure:
+ # generator
+ if ignore_nonexisting and self.cf_maybe_null:
+ code.put_xgotref(self.result())
+ else:
+ code.put_gotref(self.result())
+ if ignore_nonexisting and self.cf_maybe_null:
+ code.put_xdecref(self.result(), self.ctype())
+ else:
+ code.put_decref(self.result(), self.ctype())
+ code.putln('%s = NULL;' % self.result())
+ else:
+ code.put_xdecref_memoryviewslice(self.entry.cname,
+ have_gil=not self.nogil)
+ else:
+ error(self.pos, "Deletion of C names not supported")
+
+ def annotate(self, code):
+ if hasattr(self, 'is_called') and self.is_called:
+ pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
+ if self.type.is_pyobject:
+ style, text = 'py_call', 'python function (%s)'
+ else:
+ style, text = 'c_call', 'c function (%s)'
+ code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
+
+class BackquoteNode(ExprNode):
+ # `expr`
+ #
+ # arg ExprNode
+
+ type = py_object_type
+
+ subexprs = ['arg']
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_types(env)
+ self.arg = self.arg.coerce_to_pyobject(env)
+ self.is_temp = 1
+ return self
+
+ gil_message = "Backquote expression"
+
+ def calculate_constant_result(self):
+ self.constant_result = repr(self.arg.constant_result)
+
+ def generate_result_code(self, code):
+ code.putln(
+ "%s = PyObject_Repr(%s); %s" % (
+ self.result(),
+ self.arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class ImportNode(ExprNode):
+ # Used as part of import statement implementation.
+ # Implements result =
+ # __import__(module_name, globals(), None, name_list, level)
+ #
+ # module_name StringNode dotted name of module. Empty module
+ # name means importing the parent package according
+ # to level
+ # name_list ListNode or None list of names to be imported
+ # level int relative import level:
+ # -1: attempt both relative import and absolute import;
+ # 0: absolute import;
+ # >0: the number of parent directories to search
+ # relative to the current module.
+ # None: decide the level according to language level and
+ # directives
+
+ type = py_object_type
+
+ subexprs = ['module_name', 'name_list']
+
+ def analyse_types(self, env):
+ if self.level is None:
+ if (env.directives['py2_import'] or
+ Future.absolute_import not in env.global_scope().context.future_directives):
+ self.level = -1
+ else:
+ self.level = 0
+ module_name = self.module_name.analyse_types(env)
+ self.module_name = module_name.coerce_to_pyobject(env)
+ if self.name_list:
+ name_list = self.name_list.analyse_types(env)
+ self.name_list = name_list.coerce_to_pyobject(env)
+ self.is_temp = 1
+ return self
+
+ gil_message = "Python import"
+
+ def generate_result_code(self, code):
+ if self.name_list:
+ name_list_code = self.name_list.py_result()
+ else:
+ name_list_code = "0"
+
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
+ import_code = "__Pyx_Import(%s, %s, %d)" % (
+ self.module_name.py_result(),
+ name_list_code,
+ self.level)
+
+ if (self.level <= 0 and
+ self.module_name.is_string_literal and
+ self.module_name.value in utility_code_for_imports):
+ helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value]
+ code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
+ import_code = '%s(%s)' % (helper_func, import_code)
+
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ import_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class IteratorNode(ExprNode):
+ # Used as part of for statement implementation.
+ #
+ # Implements result = iter(sequence)
+ #
+ # sequence ExprNode
+
+ type = py_object_type
+ iter_func_ptr = None
+ counter_cname = None
+ cpp_iterator_cname = None
+ reversed = False # currently only used for list/tuple types (see Optimize.py)
+ is_async = False
+
+ subexprs = ['sequence']
+
+ def analyse_types(self, env):
+ self.sequence = self.sequence.analyse_types(env)
+ if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
+ not self.sequence.type.is_string:
+ # C array iteration will be transformed later on
+ self.type = self.sequence.type
+ elif self.sequence.type.is_cpp_class:
+ self.analyse_cpp_types(env)
+ else:
+ self.sequence = self.sequence.coerce_to_pyobject(env)
+ if self.sequence.type in (list_type, tuple_type):
+ self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
+ self.is_temp = 1
+ return self
+
+ gil_message = "Iterating over Python object"
+
+ _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
+ ]))
+
+ def type_dependencies(self, env):
+ return self.sequence.type_dependencies(env)
+
+ def infer_type(self, env):
+ sequence_type = self.sequence.infer_type(env)
+ if sequence_type.is_array or sequence_type.is_ptr:
+ return sequence_type
+ elif sequence_type.is_cpp_class:
+ begin = sequence_type.scope.lookup("begin")
+ if begin is not None:
+ return begin.type.return_type
+ elif sequence_type.is_pyobject:
+ return sequence_type
+ return py_object_type
+
+ def analyse_cpp_types(self, env):
+ sequence_type = self.sequence.type
+ if sequence_type.is_ptr:
+ sequence_type = sequence_type.base_type
+ begin = sequence_type.scope.lookup("begin")
+ end = sequence_type.scope.lookup("end")
+ if (begin is None
+ or not begin.type.is_cfunction
+ or begin.type.args):
+ error(self.pos, "missing begin() on %s" % self.sequence.type)
+ self.type = error_type
+ return
+ if (end is None
+ or not end.type.is_cfunction
+ or end.type.args):
+ error(self.pos, "missing end() on %s" % self.sequence.type)
+ self.type = error_type
+ return
+ iter_type = begin.type.return_type
+ if iter_type.is_cpp_class:
+ if env.lookup_operator_for_types(
+ self.pos,
+ "!=",
+ [iter_type, end.type.return_type]) is None:
+ error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
+ self.type = error_type
+ return
+ if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
+ error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
+ self.type = error_type
+ return
+ if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
+ error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
+ self.type = error_type
+ return
+ self.type = iter_type
+ elif iter_type.is_ptr:
+ if not (iter_type == end.type.return_type):
+ error(self.pos, "incompatible types for begin() and end()")
+ self.type = iter_type
+ else:
+ error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
+ self.type = error_type
+ return
+
+ def generate_result_code(self, code):
+ sequence_type = self.sequence.type
+ if sequence_type.is_cpp_class:
+ if self.sequence.is_name:
+ # safe: C++ won't allow you to reassign to class references
+ begin_func = "%s.begin" % self.sequence.result()
+ else:
+ sequence_type = PyrexTypes.c_ptr_type(sequence_type)
+ self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
+ code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
+ begin_func = "%s->begin" % self.cpp_iterator_cname
+ # TODO: Limit scope.
+ code.putln("%s = %s();" % (self.result(), begin_func))
+ return
+ if sequence_type.is_array or sequence_type.is_ptr:
+ raise InternalError("for in carray slice not transformed")
+
+ is_builtin_sequence = sequence_type in (list_type, tuple_type)
+ if not is_builtin_sequence:
+ # reversed() not currently optimised (see Optimize.py)
+ assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
+ self.may_be_a_sequence = not sequence_type.is_builtin_type
+ if self.may_be_a_sequence:
+ code.putln(
+ "if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
+ self.sequence.py_result(),
+ self.sequence.py_result()))
+
+ if is_builtin_sequence or self.may_be_a_sequence:
+ self.counter_cname = code.funcstate.allocate_temp(
+ PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ if self.reversed:
+ if sequence_type is list_type:
+ init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
+ else:
+ init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
+ else:
+ init_value = '0'
+ code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
+ self.result(),
+ self.sequence.py_result(),
+ self.result(),
+ self.counter_cname,
+ init_value))
+ if not is_builtin_sequence:
+ self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
+ if self.may_be_a_sequence:
+ code.putln("%s = NULL;" % self.iter_func_ptr)
+ code.putln("} else {")
+ code.put("%s = -1; " % self.counter_cname)
+
+ code.putln("%s = PyObject_GetIter(%s); %s" % (
+ self.result(),
+ self.sequence.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ # PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
+ # makes it visible to the C compiler that the pointer really isn't NULL, so that
+ # it can distinguish between the special cases and the generic case
+ code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
+ self.iter_func_ptr, self.py_result(),
+ code.error_goto_if_null(self.iter_func_ptr, self.pos)))
+ if self.may_be_a_sequence:
+ code.putln("}")
+
+ def generate_next_sequence_item(self, test_name, result_name, code):
+ assert self.counter_cname, "internal error: counter_cname temp not prepared"
+ final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
+ if self.sequence.is_sequence_constructor:
+ item_count = len(self.sequence.args)
+ if self.sequence.mult_factor is None:
+ final_size = item_count
+ elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
+ final_size = item_count * self.sequence.mult_factor.constant_result
+ code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
+ if self.reversed:
+ inc_dec = '--'
+ else:
+ inc_dec = '++'
+ code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
+ code.putln(
+ "%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
+ result_name,
+ test_name,
+ self.py_result(),
+ self.counter_cname,
+ result_name,
+ self.counter_cname,
+ inc_dec,
+ # use the error label to avoid C compiler warnings if we only use it below
+ code.error_goto_if_neg('0', self.pos)
+ ))
+ code.putln("#else")
+ code.putln(
+ "%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
+ result_name,
+ self.py_result(),
+ self.counter_cname,
+ self.counter_cname,
+ inc_dec,
+ code.error_goto_if_null(result_name, self.pos)))
+ code.put_gotref(result_name)
+ code.putln("#endif")
+
+ def generate_iter_next_result_code(self, result_name, code):
+ sequence_type = self.sequence.type
+ if self.reversed:
+ code.putln("if (%s < 0) break;" % self.counter_cname)
+ if sequence_type.is_cpp_class:
+ if self.cpp_iterator_cname:
+ end_func = "%s->end" % self.cpp_iterator_cname
+ else:
+ end_func = "%s.end" % self.sequence.result()
+ # TODO: Cache end() call?
+ code.putln("if (!(%s != %s())) break;" % (
+ self.result(),
+ end_func))
+ code.putln("%s = *%s;" % (
+ result_name,
+ self.result()))
+ code.putln("++%s;" % self.result())
+ return
+ elif sequence_type is list_type:
+ self.generate_next_sequence_item('List', result_name, code)
+ return
+ elif sequence_type is tuple_type:
+ self.generate_next_sequence_item('Tuple', result_name, code)
+ return
+
+ if self.may_be_a_sequence:
+ code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
+ code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
+ self.generate_next_sequence_item('List', result_name, code)
+ code.putln("} else {")
+ self.generate_next_sequence_item('Tuple', result_name, code)
+ code.putln("}")
+ code.put("} else ")
+
+ code.putln("{")
+ code.putln(
+ "%s = %s(%s);" % (
+ result_name,
+ self.iter_func_ptr,
+ self.py_result()))
+ code.putln("if (unlikely(!%s)) {" % result_name)
+ code.putln("PyObject* exc_type = PyErr_Occurred();")
+ code.putln("if (exc_type) {")
+ code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
+ code.putln("else %s" % code.error_goto(self.pos))
+ code.putln("}")
+ code.putln("break;")
+ code.putln("}")
+ code.put_gotref(result_name)
+ code.putln("}")
+
+ def free_temps(self, code):
+ if self.counter_cname:
+ code.funcstate.release_temp(self.counter_cname)
+ if self.iter_func_ptr:
+ code.funcstate.release_temp(self.iter_func_ptr)
+ self.iter_func_ptr = None
+ if self.cpp_iterator_cname:
+ code.funcstate.release_temp(self.cpp_iterator_cname)
+ ExprNode.free_temps(self, code)
+
+
+class NextNode(AtomicExprNode):
+ # Used as part of for statement implementation.
+ # Implements result = next(iterator)
+ # Created during analyse_types phase.
+ # The iterator is not owned by this node.
+ #
+ # iterator IteratorNode
+
+ def __init__(self, iterator):
+ AtomicExprNode.__init__(self, iterator.pos)
+ self.iterator = iterator
+
+ def nogil_check(self, env):
+ # ignore - errors (if any) are already handled by IteratorNode
+ pass
+
+ def type_dependencies(self, env):
+ return self.iterator.type_dependencies(env)
+
+ def infer_type(self, env, iterator_type=None):
+ if iterator_type is None:
+ iterator_type = self.iterator.infer_type(env)
+ if iterator_type.is_ptr or iterator_type.is_array:
+ return iterator_type.base_type
+ elif self.iterator.sequence.type is bytearray_type:
+ # This is a temporary work-around to fix bytearray iteration in 0.29.x
+ # It has been fixed properly in master, refer to ticket: 3473
+ return py_object_type
+ elif iterator_type.is_cpp_class:
+ item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
+ if item_type.is_reference:
+ item_type = item_type.ref_base_type
+ if item_type.is_const:
+ item_type = item_type.const_base_type
+ return item_type
+ else:
+ # Avoid duplication of complicated logic.
+ fake_index_node = IndexNode(
+ self.pos,
+ base=self.iterator.sequence,
+ index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type))
+ return fake_index_node.infer_type(env)
+
+ def analyse_types(self, env):
+ self.type = self.infer_type(env, self.iterator.type)
+ self.is_temp = 1
+ return self
+
+ def generate_result_code(self, code):
+ self.iterator.generate_iter_next_result_code(self.result(), code)
+
+
+class AsyncIteratorNode(ExprNode):
+ # Used as part of 'async for' statement implementation.
+ #
+ # Implements result = sequence.__aiter__()
+ #
+ # sequence ExprNode
+
+ subexprs = ['sequence']
+
+ is_async = True
+ type = py_object_type
+ is_temp = 1
+
+ def infer_type(self, env):
+ return py_object_type
+
+ def analyse_types(self, env):
+ self.sequence = self.sequence.analyse_types(env)
+ if not self.sequence.type.is_pyobject:
+ error(self.pos, "async for loops not allowed on C/C++ types")
+ self.sequence = self.sequence.coerce_to_pyobject(env)
+ return self
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
+ code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
+ self.result(),
+ self.sequence.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+
+class AsyncNextNode(AtomicExprNode):
+ # Used as part of 'async for' statement implementation.
+ # Implements result = iterator.__anext__()
+ # Created during analyse_types phase.
+ # The iterator is not owned by this node.
+ #
+ # iterator IteratorNode
+
+ type = py_object_type
+ is_temp = 1
+
+ def __init__(self, iterator):
+ AtomicExprNode.__init__(self, iterator.pos)
+ self.iterator = iterator
+
+ def infer_type(self, env):
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
+ code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
+ self.result(),
+ self.iterator.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+
+class WithExitCallNode(ExprNode):
+ # The __exit__() call of a 'with' statement. Used in both the
+ # except and finally clauses.
+
+ # with_stat WithStatNode the surrounding 'with' statement
+ # args TupleNode or ResultStatNode the exception info tuple
+ # await_expr AwaitExprNode the await expression of an 'async with' statement
+
+ subexprs = ['args', 'await_expr']
+ test_if_run = True
+ await_expr = None
+
+ def analyse_types(self, env):
+ self.args = self.args.analyse_types(env)
+ if self.await_expr:
+ self.await_expr = self.await_expr.analyse_types(env)
+ self.type = PyrexTypes.c_bint_type
+ self.is_temp = True
+ return self
+
+ def generate_evaluation_code(self, code):
+ if self.test_if_run:
+ # call only if it was not already called (and decref-cleared)
+ code.putln("if (%s) {" % self.with_stat.exit_var)
+
+ self.args.generate_evaluation_code(code)
+ result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+
+ code.mark_pos(self.pos)
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
+ result_var,
+ self.with_stat.exit_var,
+ self.args.result()))
+ code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
+ self.args.generate_disposal_code(code)
+ self.args.free_temps(code)
+
+ code.putln(code.error_goto_if_null(result_var, self.pos))
+ code.put_gotref(result_var)
+
+ if self.await_expr:
+ # FIXME: result_var temp currently leaks into the closure
+ self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
+ code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
+ self.await_expr.generate_post_assignment_code(code)
+ self.await_expr.free_temps(code)
+
+ if self.result_is_used:
+ self.allocate_temp_result(code)
+ code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
+ code.put_decref_clear(result_var, type=py_object_type)
+ if self.result_is_used:
+ code.put_error_if_neg(self.pos, self.result())
+ code.funcstate.release_temp(result_var)
+ if self.test_if_run:
+ code.putln("}")
+
+
+class ExcValueNode(AtomicExprNode):
+ # Node created during analyse_types phase
+ # of an ExceptClauseNode to fetch the current
+ # exception value.
+
+ type = py_object_type
+
+ def __init__(self, pos):
+ ExprNode.__init__(self, pos)
+
+ def set_var(self, var):
+ self.var = var
+
+ def calculate_result_code(self):
+ return self.var
+
+ def generate_result_code(self, code):
+ pass
+
+ def analyse_types(self, env):
+ return self
+
+
+class TempNode(ExprNode):
+ # Node created during analyse_types phase
+ # of some nodes to hold a temporary value.
+ #
+ # Note: One must call "allocate" and "release" on
+ # the node during code generation to get/release the temp.
+ # This is because the temp result is often used outside of
+ # the regular cycle.
+
+ subexprs = []
+
+ def __init__(self, pos, type, env=None):
+ ExprNode.__init__(self, pos)
+ self.type = type
+ if type.is_pyobject:
+ self.result_ctype = py_object_type
+ self.is_temp = 1
+
+ def analyse_types(self, env):
+ return self
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def generate_result_code(self, code):
+ pass
+
+ def allocate(self, code):
+ self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
+
+ def release(self, code):
+ code.funcstate.release_temp(self.temp_cname)
+ self.temp_cname = None
+
+ def result(self):
+ try:
+ return self.temp_cname
+ except:
+ assert False, "Remember to call allocate/release on TempNode"
+ raise
+
+ # Do not participate in normal temp alloc/dealloc:
+ def allocate_temp_result(self, code):
+ pass
+
+ def release_temp_result(self, code):
+ pass
+
+class PyTempNode(TempNode):
+ # TempNode holding a Python value.
+
+ def __init__(self, pos, env):
+ TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
+
+class RawCNameExprNode(ExprNode):
+ subexprs = []
+
+ def __init__(self, pos, type=None, cname=None):
+ ExprNode.__init__(self, pos, type=type)
+ if cname is not None:
+ self.cname = cname
+
+ def analyse_types(self, env):
+ return self
+
+ def set_cname(self, cname):
+ self.cname = cname
+
+ def result(self):
+ return self.cname
+
+ def generate_result_code(self, code):
+ pass
+
+
+#-------------------------------------------------------------------
+#
+# F-strings
+#
+#-------------------------------------------------------------------
+
+
+class JoinedStrNode(ExprNode):
+ # F-strings
+ #
+ # values [UnicodeNode|FormattedValueNode] Substrings of the f-string
+ #
+ type = unicode_type
+ is_temp = True
+
+ subexprs = ['values']
+
+ def analyse_types(self, env):
+ self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values]
+ return self
+
+ def may_be_none(self):
+ # PyUnicode_Join() always returns a Unicode string or raises an exception
+ return False
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ num_items = len(self.values)
+ list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
+
+ code.putln('%s = PyTuple_New(%s); %s' % (
+ list_var,
+ num_items,
+ code.error_goto_if_null(list_var, self.pos)))
+ code.put_gotref(list_var)
+ code.putln("%s = 0;" % ulength_var)
+ code.putln("%s = 127;" % max_char_var) # at least ASCII character range
+
+ for i, node in enumerate(self.values):
+ node.generate_evaluation_code(code)
+ node.make_owned_reference(code)
+
+ ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
+ max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
+ is_ascii = False
+ if isinstance(node, UnicodeNode):
+ try:
+ # most strings will be ASCII or at least Latin-1
+ node.value.encode('iso8859-1')
+ max_char_value = '255'
+ node.value.encode('us-ascii')
+ is_ascii = True
+ except UnicodeEncodeError:
+ if max_char_value != '255':
+ # not ISO8859-1 => check BMP limit
+ max_char = max(map(ord, node.value))
+ if max_char < 0xD800:
+ # BMP-only, no surrogate pairs used
+ max_char_value = '65535'
+ ulength = str(len(node.value))
+ elif max_char >= 65536:
+ # cleary outside of BMP, and not on a 16-bit Unicode system
+ max_char_value = '1114111'
+ ulength = str(len(node.value))
+ else:
+ # not really worth implementing a check for surrogate pairs here
+ # drawback: C code can differ when generating on Py2 with 2-byte Unicode
+ pass
+ else:
+ ulength = str(len(node.value))
+ elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
+ is_ascii = True # formatted C numbers are always ASCII
+
+ if not is_ascii:
+ code.putln("%s = (%s > %s) ? %s : %s;" % (
+ max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
+ code.putln("%s += %s;" % (ulength_var, ulength))
+
+ code.put_giveref(node.py_result())
+ code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
+ node.generate_post_assignment_code(code)
+ node.free_temps(code)
+
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+ code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
+ code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % (
+ self.result(),
+ list_var,
+ num_items,
+ ulength_var,
+ max_char_var,
+ code.error_goto_if_null(self.py_result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ code.put_decref_clear(list_var, py_object_type)
+ code.funcstate.release_temp(list_var)
+ code.funcstate.release_temp(ulength_var)
+ code.funcstate.release_temp(max_char_var)
+
+
+class FormattedValueNode(ExprNode):
+ # {}-delimited portions of an f-string
+ #
+ # value ExprNode The expression itself
+ # conversion_char str or None Type conversion (!s, !r, !a, or none, or 'd' for integer conversion)
+ # format_spec JoinedStrNode or None Format string passed to __format__
+ # c_format_spec str or None If not None, formatting can be done at the C level
+
+ subexprs = ['value', 'format_spec']
+
+ type = unicode_type
+ is_temp = True
+ c_format_spec = None
+
+ find_conversion_func = {
+ 's': 'PyObject_Unicode',
+ 'r': 'PyObject_Repr',
+ 'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2
+ 'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting
+ }.get
+
+ def may_be_none(self):
+ # PyObject_Format() always returns a Unicode string or raises an exception
+ return False
+
+ def analyse_types(self, env):
+ self.value = self.value.analyse_types(env)
+ if not self.format_spec or self.format_spec.is_string_literal:
+ c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec
+ if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
+ self.c_format_spec = c_format_spec
+
+ if self.format_spec:
+ self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
+ if self.c_format_spec is None:
+ self.value = self.value.coerce_to_pyobject(env)
+ if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
+ if self.value.type is unicode_type and not self.value.may_be_none():
+ # value is definitely a unicode string and we don't format it any special
+ return self.value
+ return self
+
+ def generate_result_code(self, code):
+ if self.c_format_spec is not None and not self.value.type.is_pyobject:
+ convert_func_call = self.value.type.convert_to_pystring(
+ self.value.result(), code, self.c_format_spec)
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ convert_func_call,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ return
+
+ value_result = self.value.py_result()
+ value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
+ if self.format_spec:
+ format_func = '__Pyx_PyObject_Format'
+ format_spec = self.format_spec.py_result()
+ else:
+ # common case: expect simple Unicode pass-through if no format spec
+ format_func = '__Pyx_PyObject_FormatSimple'
+ # passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
+ format_spec = Naming.empty_unicode
+
+ conversion_char = self.conversion_char
+ if conversion_char == 's' and value_is_unicode:
+ # no need to pipe unicode strings through str()
+ conversion_char = None
+
+ if conversion_char:
+ fn = self.find_conversion_func(conversion_char)
+ assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
+ value_result = '%s(%s)' % (fn, value_result)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
+ format_func += 'AndDecref'
+ elif self.format_spec:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
+
+ code.putln("%s = %s(%s, %s); %s" % (
+ self.result(),
+ format_func,
+ value_result,
+ format_spec,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+#-------------------------------------------------------------------
+#
+# Parallel nodes (cython.parallel.thread(savailable|id))
+#
+#-------------------------------------------------------------------
+
+class ParallelThreadsAvailableNode(AtomicExprNode):
+ """
+ Note: this is disabled and not a valid directive at this moment
+
+ Implements cython.parallel.threadsavailable(). If we are called from the
+ sequential part of the application, we need to call omp_get_max_threads(),
+ and in the parallel part we can just call omp_get_num_threads()
+ """
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_types(self, env):
+ self.is_temp = True
+ # env.add_include_file("omp.h")
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("#ifdef _OPENMP")
+ code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
+ self.temp_code)
+ code.putln("else %s = omp_get_num_threads();" % self.temp_code)
+ code.putln("#else")
+ code.putln("%s = 1;" % self.temp_code)
+ code.putln("#endif")
+
+ def result(self):
+ return self.temp_code
+
+
+class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
+ """
+ Implements cython.parallel.threadid()
+ """
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_types(self, env):
+ self.is_temp = True
+ # env.add_include_file("omp.h")
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("#ifdef _OPENMP")
+ code.putln("%s = omp_get_thread_num();" % self.temp_code)
+ code.putln("#else")
+ code.putln("%s = 0;" % self.temp_code)
+ code.putln("#endif")
+
+ def result(self):
+ return self.temp_code
+
+
+#-------------------------------------------------------------------
+#
+# Trailer nodes
+#
+#-------------------------------------------------------------------
+
+
+class _IndexingBaseNode(ExprNode):
+ # Base class for indexing nodes.
+ #
+ # base ExprNode the value being indexed
+
+ def is_ephemeral(self):
+ # in most cases, indexing will return a safe reference to an object in a container,
+ # so we consider the result safe if the base object is
+ return self.base.is_ephemeral() or self.base.type in (
+ basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
+
+ def check_const_addr(self):
+ return self.base.check_const_addr() and self.index.check_const()
+
+ def is_lvalue(self):
+ # NOTE: references currently have both is_reference and is_ptr
+ # set. Since pointers and references have different lvalue
+ # rules, we must be careful to separate the two.
+ if self.type.is_reference:
+ if self.type.ref_base_type.is_array:
+ # fixed-sized arrays aren't l-values
+ return False
+ elif self.type.is_ptr:
+ # non-const pointers can always be reassigned
+ return True
+ # Just about everything else returned by the index operator
+ # can be an lvalue.
+ return True
+
+
+class IndexNode(_IndexingBaseNode):
+ # Sequence indexing.
+ #
+ # base ExprNode
+ # index ExprNode
+ # type_indices [PyrexType]
+ #
+ # is_fused_index boolean Whether the index is used to specialize a
+ # c(p)def function
+
+ subexprs = ['base', 'index']
+ type_indices = None
+
+ is_subscript = True
+ is_fused_index = False
+
+ def calculate_constant_result(self):
+ self.constant_result = self.base.constant_result[self.index.constant_result]
+
+ def compile_time_value(self, denv):
+ base = self.base.compile_time_value(denv)
+ index = self.index.compile_time_value(denv)
+ try:
+ return base[index]
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def is_simple(self):
+ base = self.base
+ return (base.is_simple() and self.index.is_simple()
+ and base.type and (base.type.is_ptr or base.type.is_array))
+
+ def may_be_none(self):
+ base_type = self.base.type
+ if base_type:
+ if base_type.is_string:
+ return False
+ if isinstance(self.index, SliceNode):
+ # slicing!
+ if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def analyse_as_type(self, env):
+ base_type = self.base.analyse_as_type(env)
+ if base_type and not base_type.is_pyobject:
+ if base_type.is_cpp_class:
+ if isinstance(self.index, TupleNode):
+ template_values = self.index.args
+ else:
+ template_values = [self.index]
+ type_node = Nodes.TemplatedTypeNode(
+ pos=self.pos,
+ positional_args=template_values,
+ keyword_args=None)
+ return type_node.analyse(env, base_type=base_type)
+ elif self.index.is_slice or self.index.is_sequence_constructor:
+ # memory view
+ from . import MemoryView
+ env.use_utility_code(MemoryView.view_utility_code)
+ axes = [self.index] if self.index.is_slice else list(self.index.args)
+ return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes))
+ else:
+ # C array
+ index = self.index.compile_time_value(env)
+ if index is not None:
+ try:
+ index = int(index)
+ except (ValueError, TypeError):
+ pass
+ else:
+ return PyrexTypes.CArrayType(base_type, index)
+ error(self.pos, "Array size must be a compile time constant")
+ return None
+
+ def type_dependencies(self, env):
+ return self.base.type_dependencies(env) + self.index.type_dependencies(env)
+
+ def infer_type(self, env):
+ base_type = self.base.infer_type(env)
+ if self.index.is_slice:
+ # slicing!
+ if base_type.is_string:
+ # sliced C strings must coerce to Python
+ return bytes_type
+ elif base_type.is_pyunicode_ptr:
+ # sliced Py_UNICODE* strings must coerce to Python
+ return unicode_type
+ elif base_type in (unicode_type, bytes_type, str_type,
+ bytearray_type, list_type, tuple_type):
+ # slicing these returns the same type
+ return base_type
+ else:
+ # TODO: Handle buffers (hopefully without too much redundancy).
+ return py_object_type
+
+ index_type = self.index.infer_type(env)
+ if index_type and index_type.is_int or isinstance(self.index, IntNode):
+ # indexing!
+ if base_type is unicode_type:
+ # Py_UCS4 will automatically coerce to a unicode string
+ # if required, so this is safe. We only infer Py_UCS4
+ # when the index is a C integer type. Otherwise, we may
+ # need to use normal Python item access, in which case
+ # it's faster to return the one-char unicode string than
+ # to receive it, throw it away, and potentially rebuild it
+ # on a subsequent PyObject coercion.
+ return PyrexTypes.c_py_ucs4_type
+ elif base_type is str_type:
+ # always returns str - Py2: bytes, Py3: unicode
+ return base_type
+ elif base_type is bytearray_type:
+ return PyrexTypes.c_uchar_type
+ elif isinstance(self.base, BytesNode):
+ #if env.global_scope().context.language_level >= 3:
+ # # inferring 'char' can be made to work in Python 3 mode
+ # return PyrexTypes.c_char_type
+ # Py2/3 return different types on indexing bytes objects
+ return py_object_type
+ elif base_type in (tuple_type, list_type):
+ # if base is a literal, take a look at its values
+ item_type = infer_sequence_item_type(
+ env, self.base, self.index, seq_type=base_type)
+ if item_type is not None:
+ return item_type
+ elif base_type.is_ptr or base_type.is_array:
+ return base_type.base_type
+ elif base_type.is_ctuple and isinstance(self.index, IntNode):
+ if self.index.has_constant_result():
+ index = self.index.constant_result
+ if index < 0:
+ index += base_type.size
+ if 0 <= index < base_type.size:
+ return base_type.components[index]
+
+ if base_type.is_cpp_class:
+ class FakeOperand:
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+ operands = [
+ FakeOperand(pos=self.pos, type=base_type),
+ FakeOperand(pos=self.pos, type=index_type),
+ ]
+ index_func = env.lookup_operator('[]', operands)
+ if index_func is not None:
+ return index_func.type.return_type
+
+ if is_pythran_expr(base_type) and is_pythran_expr(index_type):
+ index_with_type = (self.index, index_type)
+ return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
+
+ # may be slicing or indexing, we don't know
+ if base_type in (unicode_type, str_type):
+ # these types always returns their own type on Python indexing/slicing
+ return base_type
+ else:
+ # TODO: Handle buffers (hopefully without too much redundancy).
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self.analyse_base_and_index_types(env, getting=True)
+
+ def analyse_target_types(self, env):
+ node = self.analyse_base_and_index_types(env, setting=True)
+ if node.type.is_const:
+ error(self.pos, "Assignment to const dereference")
+ if node is self and not node.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
+ return node
+
+ def analyse_base_and_index_types(self, env, getting=False, setting=False,
+ analyse_base=True):
+ # Note: This might be cleaned up by having IndexNode
+ # parsed in a saner way and only construct the tuple if
+ # needed.
+ if analyse_base:
+ self.base = self.base.analyse_types(env)
+
+ if self.base.type.is_error:
+ # Do not visit child tree if base is undeclared to avoid confusing
+ # error messages
+ self.type = PyrexTypes.error_type
+ return self
+
+ is_slice = self.index.is_slice
+ if not env.directives['wraparound']:
+ if is_slice:
+ check_negative_indices(self.index.start, self.index.stop)
+ else:
+ check_negative_indices(self.index)
+
+ # Potentially overflowing index value.
+ if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
+ self.index = self.index.coerce_to_pyobject(env)
+
+ is_memslice = self.base.type.is_memoryviewslice
+ # Handle the case where base is a literal char* (and we expect a string, not an int)
+ if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
+ if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
+ self.base = self.base.coerce_to_pyobject(env)
+
+ replacement_node = self.analyse_as_buffer_operation(env, getting)
+ if replacement_node is not None:
+ return replacement_node
+
+ self.nogil = env.nogil
+ base_type = self.base.type
+
+ if not base_type.is_cfunction:
+ self.index = self.index.analyse_types(env)
+ self.original_index_type = self.index.type
+
+ if base_type.is_unicode_char:
+ # we infer Py_UNICODE/Py_UCS4 for unicode strings in some
+ # cases, but indexing must still work for them
+ if setting:
+ warning(self.pos, "cannot assign to Unicode string index", level=1)
+ elif self.index.constant_result in (0, -1):
+ # uchar[0] => uchar
+ return self.base
+ self.base = self.base.coerce_to_pyobject(env)
+ base_type = self.base.type
+
+ if base_type.is_pyobject:
+ return self.analyse_as_pyobject(env, is_slice, getting, setting)
+ elif base_type.is_ptr or base_type.is_array:
+ return self.analyse_as_c_array(env, is_slice)
+ elif base_type.is_cpp_class:
+ return self.analyse_as_cpp(env, setting)
+ elif base_type.is_cfunction:
+ return self.analyse_as_c_function(env)
+ elif base_type.is_ctuple:
+ return self.analyse_as_c_tuple(env, getting, setting)
+ else:
+ error(self.pos,
+ "Attempting to index non-array type '%s'" %
+ base_type)
+ self.type = PyrexTypes.error_type
+ return self
+
+ def analyse_as_pyobject(self, env, is_slice, getting, setting):
+ base_type = self.base.type
+ if self.index.type.is_unicode_char and base_type is not dict_type:
+ # TODO: eventually fold into case below and remove warning, once people have adapted their code
+ warning(self.pos,
+ "Item lookup of unicode character codes now always converts to a Unicode string. "
+ "Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
+ self.index = self.index.coerce_to_pyobject(env)
+ self.is_temp = 1
+ elif self.index.type.is_int and base_type is not dict_type:
+ if (getting
+ and (base_type in (list_type, tuple_type, bytearray_type))
+ and (not self.index.type.signed
+ or not env.directives['wraparound']
+ or (isinstance(self.index, IntNode) and
+ self.index.has_constant_result() and self.index.constant_result >= 0))
+ and not env.directives['boundscheck']):
+ self.is_temp = 0
+ else:
+ self.is_temp = 1
+ self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
+ self.original_index_type.create_to_py_utility_code(env)
+ else:
+ self.index = self.index.coerce_to_pyobject(env)
+ self.is_temp = 1
+
+ if self.index.type.is_int and base_type is unicode_type:
+ # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
+ # if required, so this is fast and safe
+ self.type = PyrexTypes.c_py_ucs4_type
+ elif self.index.type.is_int and base_type is bytearray_type:
+ if setting:
+ self.type = PyrexTypes.c_uchar_type
+ else:
+ # not using 'uchar' to enable fast and safe error reporting as '-1'
+ self.type = PyrexTypes.c_int_type
+ elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
+ self.type = base_type
+ else:
+ item_type = None
+ if base_type in (list_type, tuple_type) and self.index.type.is_int:
+ item_type = infer_sequence_item_type(
+ env, self.base, self.index, seq_type=base_type)
+ if item_type is None:
+ item_type = py_object_type
+ self.type = item_type
+ if base_type in (list_type, tuple_type, dict_type):
+ # do the None check explicitly (not in a helper) to allow optimising it away
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+
+ self.wrap_in_nonecheck_node(env, getting)
+ return self
+
+ def analyse_as_c_array(self, env, is_slice):
+ base_type = self.base.type
+ self.type = base_type.base_type
+ if is_slice:
+ self.type = base_type
+ elif self.index.type.is_pyobject:
+ self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+ elif not self.index.type.is_int:
+ error(self.pos, "Invalid index type '%s'" % self.index.type)
+ return self
+
+ def analyse_as_cpp(self, env, setting):
+ base_type = self.base.type
+ function = env.lookup_operator("[]", [self.base, self.index])
+ if function is None:
+ error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return self
+ func_type = function.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check:
+ if not setting:
+ self.is_temp = True
+ if self.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ self.index = self.index.coerce_to(func_type.args[0].type, env)
+ self.type = func_type.return_type
+ if setting and not func_type.return_type.is_reference:
+ error(self.pos, "Can't set non-reference result '%s'" % self.type)
+ return self
+
+ def analyse_as_c_function(self, env):
+ base_type = self.base.type
+ if base_type.is_fused:
+ self.parse_indexed_fused_cdef(env)
+ else:
+ self.type_indices = self.parse_index_as_types(env)
+ self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
+ if base_type.templates is None:
+ error(self.pos, "Can only parameterize template functions.")
+ self.type = error_type
+ elif self.type_indices is None:
+ # Error recorded earlier.
+ self.type = error_type
+ elif len(base_type.templates) != len(self.type_indices):
+ error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
+ (len(base_type.templates), len(self.type_indices))))
+ self.type = error_type
+ else:
+ self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
+ # FIXME: use a dedicated Node class instead of generic IndexNode
+ return self
+
+ def analyse_as_c_tuple(self, env, getting, setting):
+ base_type = self.base.type
+ if isinstance(self.index, IntNode) and self.index.has_constant_result():
+ index = self.index.constant_result
+ if -base_type.size <= index < base_type.size:
+ if index < 0:
+ index += base_type.size
+ self.type = base_type.components[index]
+ else:
+ error(self.pos,
+ "Index %s out of bounds for '%s'" %
+ (index, base_type))
+ self.type = PyrexTypes.error_type
+ return self
+ else:
+ self.base = self.base.coerce_to_pyobject(env)
+ return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
+
+ def analyse_as_buffer_operation(self, env, getting):
+ """
+ Analyse buffer indexing and memoryview indexing/slicing
+ """
+ if isinstance(self.index, TupleNode):
+ indices = self.index.args
+ else:
+ indices = [self.index]
+
+ base = self.base
+ base_type = base.type
+ replacement_node = None
+ if base_type.is_memoryviewslice:
+ # memoryviewslice indexing or slicing
+ from . import MemoryView
+ if base.is_memview_slice:
+ # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
+ merged_indices = base.merged_indices(indices)
+ if merged_indices is not None:
+ base = base.base
+ base_type = base.type
+ indices = merged_indices
+ have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
+ if have_slices:
+ replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
+ else:
+ replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
+ elif base_type.is_buffer or base_type.is_pythran_expr:
+ if base_type.is_pythran_expr or len(indices) == base_type.ndim:
+ # Buffer indexing
+ is_buffer_access = True
+ indices = [index.analyse_types(env) for index in indices]
+ if base_type.is_pythran_expr:
+ do_replacement = all(
+ index.type.is_int or index.is_slice or index.type.is_pythran_expr
+ for index in indices)
+ if do_replacement:
+ for i,index in enumerate(indices):
+ if index.is_slice:
+ index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
+ index = index.analyse_types(env)
+ indices[i] = index
+ else:
+ do_replacement = all(index.type.is_int for index in indices)
+ if do_replacement:
+ replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
+ # On cloning, indices is cloned. Otherwise, unpack index into indices.
+ assert not isinstance(self.index, CloneNode)
+
+ if replacement_node is not None:
+ replacement_node = replacement_node.analyse_types(env, getting)
+ return replacement_node
+
+ def wrap_in_nonecheck_node(self, env, getting):
+ if not env.directives['nonecheck'] or not self.base.may_be_none():
+ return
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+
+ def parse_index_as_types(self, env, required=True):
+ if isinstance(self.index, TupleNode):
+ indices = self.index.args
+ else:
+ indices = [self.index]
+ type_indices = []
+ for index in indices:
+ type_indices.append(index.analyse_as_type(env))
+ if type_indices[-1] is None:
+ if required:
+ error(index.pos, "not parsable as a type")
+ return None
+ return type_indices
+
+ def parse_indexed_fused_cdef(self, env):
+ """
+ Interpret fused_cdef_func[specific_type1, ...]
+
+ Note that if this method is called, we are an indexed cdef function
+ with fused argument types, and this IndexNode will be replaced by the
+ NameNode with specific entry just after analysis of expressions by
+ AnalyseExpressionsTransform.
+ """
+ self.type = PyrexTypes.error_type
+
+ self.is_fused_index = True
+
+ base_type = self.base.type
+ positions = []
+
+ if self.index.is_name or self.index.is_attribute:
+ positions.append(self.index.pos)
+ elif isinstance(self.index, TupleNode):
+ for arg in self.index.args:
+ positions.append(arg.pos)
+ specific_types = self.parse_index_as_types(env, required=False)
+
+ if specific_types is None:
+ self.index = self.index.analyse_types(env)
+
+ if not self.base.entry.as_variable:
+ error(self.pos, "Can only index fused functions with types")
+ else:
+ # A cpdef function indexed with Python objects
+ self.base.entry = self.entry = self.base.entry.as_variable
+ self.base.type = self.type = self.entry.type
+
+ self.base.is_temp = True
+ self.is_temp = True
+
+ self.entry.used = True
+
+ self.is_fused_index = False
+ return
+
+ for i, type in enumerate(specific_types):
+ specific_types[i] = type.specialize_fused(env)
+
+ fused_types = base_type.get_fused_types()
+ if len(specific_types) > len(fused_types):
+ return error(self.pos, "Too many types specified")
+ elif len(specific_types) < len(fused_types):
+ t = fused_types[len(specific_types)]
+ return error(self.pos, "Not enough types specified to specialize "
+ "the function, %s is still fused" % t)
+
+ # See if our index types form valid specializations
+ for pos, specific_type, fused_type in zip(positions,
+ specific_types,
+ fused_types):
+ if not any([specific_type.same_as(t) for t in fused_type.types]):
+ return error(pos, "Type not in fused type")
+
+ if specific_type is None or specific_type.is_error:
+ return
+
+ fused_to_specific = dict(zip(fused_types, specific_types))
+ type = base_type.specialize(fused_to_specific)
+
+ if type.is_fused:
+ # Only partially specific, this is invalid
+ error(self.pos,
+ "Index operation makes function only partially specific")
+ else:
+ # Fully specific, find the signature with the specialized entry
+ for signature in self.base.type.get_all_specialized_function_types():
+ if type.same_as(signature):
+ self.type = signature
+
+ if self.base.is_attribute:
+ # Pretend to be a normal attribute, for cdef extension
+ # methods
+ self.entry = signature.entry
+ self.is_attribute = True
+ self.obj = self.base.obj
+
+ self.type.entry.used = True
+ self.base.type = signature
+ self.base.entry = signature.entry
+
+ break
+ else:
+ # This is a bug
+ raise InternalError("Couldn't find the right signature")
+
+ gil_message = "Indexing Python object"
+
+ def calculate_result_code(self):
+ if self.base.type in (list_type, tuple_type, bytearray_type):
+ if self.base.type is list_type:
+ index_code = "PyList_GET_ITEM(%s, %s)"
+ elif self.base.type is tuple_type:
+ index_code = "PyTuple_GET_ITEM(%s, %s)"
+ elif self.base.type is bytearray_type:
+ index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
+ else:
+ assert False, "unexpected base type in indexing: %s" % self.base.type
+ elif self.base.type.is_cfunction:
+ return "%s<%s>" % (
+ self.base.result(),
+ ",".join([param.empty_declaration_code() for param in self.type_indices]))
+ elif self.base.type.is_ctuple:
+ index = self.index.constant_result
+ if index < 0:
+ index += self.base.type.size
+ return "%s.f%s" % (self.base.result(), index)
+ else:
+ if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
+ error(self.pos, "Invalid use of pointer slice")
+ return
+ index_code = "(%s[%s])"
+ return index_code % (self.base.result(), self.index.result())
+
+ def extra_index_params(self, code):
+ if self.index.type.is_int:
+ is_list = self.base.type is list_type
+ wraparound = (
+ bool(code.globalstate.directives['wraparound']) and
+ self.original_index_type.signed and
+ not (isinstance(self.index.constant_result, _py_int_types)
+ and self.index.constant_result >= 0))
+ boundscheck = bool(code.globalstate.directives['boundscheck'])
+ return ", %s, %d, %s, %d, %d, %d" % (
+ self.original_index_type.empty_declaration_code(),
+ self.original_index_type.signed and 1 or 0,
+ self.original_index_type.to_py_function,
+ is_list, wraparound, boundscheck)
+ else:
+ return ""
+
+ def generate_result_code(self, code):
+ if not self.is_temp:
+ # all handled in self.calculate_result_code()
+ return
+
+ utility_code = None
+ if self.type.is_pyobject:
+ error_value = 'NULL'
+ if self.index.type.is_int:
+ if self.base.type is list_type:
+ function = "__Pyx_GetItemInt_List"
+ elif self.base.type is tuple_type:
+ function = "__Pyx_GetItemInt_Tuple"
+ else:
+ function = "__Pyx_GetItemInt"
+ utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
+ else:
+ if self.base.type is dict_type:
+ function = "__Pyx_PyDict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
+ # obj[str] is probably doing a dict lookup
+ function = "__Pyx_PyObject_Dict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ else:
+ function = "__Pyx_PyObject_GetItem"
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
+ elif self.type.is_unicode_char and self.base.type is unicode_type:
+ assert self.index.type.is_int
+ function = "__Pyx_GetItemInt_Unicode"
+ error_value = '(Py_UCS4)-1'
+ utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
+ elif self.base.type is bytearray_type:
+ assert self.index.type.is_int
+ assert self.type.is_int
+ function = "__Pyx_GetItemInt_ByteArray"
+ error_value = '-1'
+ utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
+ elif not (self.base.type.is_cpp_class and self.exception_check):
+ assert False, "unexpected type %s and base type %s for indexing" % (
+ self.type, self.base.type)
+
+ if utility_code is not None:
+ code.globalstate.use_utility_code(utility_code)
+
+ if self.index.type.is_int:
+ index_code = self.index.result()
+ else:
+ index_code = self.index.py_result()
+
+ if self.base.type.is_cpp_class and self.exception_check:
+ translate_cpp_exception(code, self.pos,
+ "%s = %s[%s];" % (self.result(), self.base.result(),
+ self.index.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
+ code.putln(
+ "%s = %s(%s, %s%s); %s" % (
+ self.result(),
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(code),
+ code.error_goto_if(error_check % self.result(), self.pos)))
+ if self.type.is_pyobject:
+ code.put_gotref(self.py_result())
+
+ def generate_setitem_code(self, value_code, code):
+ if self.index.type.is_int:
+ if self.base.type is bytearray_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
+ function = "__Pyx_SetItemInt_ByteArray"
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
+ function = "__Pyx_SetItemInt"
+ index_code = self.index.result()
+ else:
+ index_code = self.index.py_result()
+ if self.base.type is dict_type:
+ function = "PyDict_SetItem"
+ # It would seem that we could specialized lists/tuples, but that
+ # shouldn't happen here.
+ # Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
+ # index instead of an object, and bad conversion here would give
+ # the wrong exception. Also, tuples are supposed to be immutable,
+ # and raise a TypeError when trying to set their entries
+ # (PyTuple_SetItem() is for creating new tuples from scratch).
+ else:
+ function = "PyObject_SetItem"
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s, %s%s)" % (
+ function,
+ self.base.py_result(),
+ index_code,
+ value_code,
+ self.extra_index_params(code)),
+ self.pos))
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.generate_subexpr_evaluation_code(code)
+
+ if self.type.is_pyobject:
+ self.generate_setitem_code(rhs.py_result(), code)
+ elif self.base.type is bytearray_type:
+ value_code = self._check_byte_value(code, rhs)
+ self.generate_setitem_code(value_code, code)
+ elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
+ if overloaded_assignment and exception_check and \
+ self.exception_value != exception_value:
+ # Handle the case that both the index operator and the assignment
+ # operator have a c++ exception handler and they are not the same.
+ translate_double_cpp_exception(code, self.pos, self.type,
+ self.result(), rhs.result(), self.exception_value,
+ exception_value, self.in_nogil_context)
+ else:
+ # Handle the case that only the index operator has a
+ # c++ exception handler, or that
+ # both exception handlers are the same.
+ translate_cpp_exception(code, self.pos,
+ "%s = %s;" % (self.result(), rhs.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln(
+ "%s = %s;" % (self.result(), rhs.result()))
+
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def _check_byte_value(self, code, rhs):
+ # TODO: should we do this generally on downcasts, or just here?
+ assert rhs.type.is_int, repr(rhs.type)
+ value_code = rhs.result()
+ if rhs.has_constant_result():
+ if 0 <= rhs.constant_result < 256:
+ return value_code
+ needs_cast = True # make at least the C compiler happy
+ warning(rhs.pos,
+ "value outside of range(0, 256)"
+ " when assigning to byte: %s" % rhs.constant_result,
+ level=1)
+ else:
+ needs_cast = rhs.type != PyrexTypes.c_uchar_type
+
+ if not self.nogil:
+ conditions = []
+ if rhs.is_literal or rhs.type.signed:
+ conditions.append('%s < 0' % value_code)
+ if (rhs.is_literal or not
+ (rhs.is_temp and rhs.type in (
+ PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
+ PyrexTypes.c_schar_type))):
+ conditions.append('%s > 255' % value_code)
+ if conditions:
+ code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
+ code.putln(
+ 'PyErr_SetString(PyExc_ValueError,'
+ ' "byte must be in range(0, 256)"); %s' %
+ code.error_goto(self.pos))
+ code.putln("}")
+
+ if needs_cast:
+ value_code = '((unsigned char)%s)' % value_code
+ return value_code
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ self.generate_subexpr_evaluation_code(code)
+ #if self.type.is_pyobject:
+ if self.index.type.is_int:
+ function = "__Pyx_DelItemInt"
+ index_code = self.index.result()
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
+ else:
+ index_code = self.index.py_result()
+ if self.base.type is dict_type:
+ function = "PyDict_DelItem"
+ else:
+ function = "PyObject_DelItem"
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s%s)" % (
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(code)),
+ self.pos))
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+
+class BufferIndexNode(_IndexingBaseNode):
+ """
+ Indexing of buffers and memoryviews. This node is created during type
+ analysis from IndexNode and replaces it.
+
+ Attributes:
+ base - base node being indexed
+ indices - list of indexing expressions
+ """
+
+ subexprs = ['base', 'indices']
+
+ is_buffer_access = True
+
+ # Whether we're assigning to a buffer (in that case it needs to be writable)
+ writable_needed = False
+
+ # Any indexing temp variables that we need to clean up.
+ index_temps = ()
+
+ def analyse_target_types(self, env):
+ self.analyse_types(env, getting=False)
+
+ def analyse_types(self, env, getting=True):
+ """
+ Analyse types for buffer indexing only. Overridden by memoryview
+ indexing and slicing subclasses
+ """
+ # self.indices are already analyzed
+ if not self.base.is_name and not is_pythran_expr(self.base.type):
+ error(self.pos, "Can only index buffer variables")
+ self.type = error_type
+ return self
+
+ if not getting:
+ if not self.base.entry.type.writable:
+ error(self.pos, "Writing to readonly buffer")
+ else:
+ self.writable_needed = True
+ if self.base.type.is_buffer:
+ self.base.entry.buffer_aux.writable_needed = True
+
+ self.none_error_message = "'NoneType' object is not subscriptable"
+ self.analyse_buffer_index(env, getting)
+ self.wrap_in_nonecheck_node(env)
+ return self
+
+ def analyse_buffer_index(self, env, getting):
+ if is_pythran_expr(self.base.type):
+ index_with_type_list = [(idx, idx.type) for idx in self.indices]
+ self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
+ else:
+ self.base = self.base.coerce_to_simple(env)
+ self.type = self.base.type.dtype
+ self.buffer_type = self.base.type
+
+ if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
+ self.is_temp = True
+
+ def analyse_assignment(self, rhs):
+ """
+ Called by IndexNode when this node is assigned to,
+ with the rhs of the assignment
+ """
+
+ def wrap_in_nonecheck_node(self, env):
+ if not env.directives['nonecheck'] or not self.base.may_be_none():
+ return
+ self.base = self.base.as_none_safe_node(self.none_error_message)
+
+ def nogil_check(self, env):
+ if self.is_buffer_access or self.is_memview_index:
+ if self.type.is_pyobject:
+ error(self.pos, "Cannot access buffer with object dtype without gil")
+ self.type = error_type
+
+ def calculate_result_code(self):
+ return "(*%s)" % self.buffer_ptr_code
+
+ def buffer_entry(self):
+ base = self.base
+ if self.base.is_nonecheck:
+ base = base.arg
+ return base.type.get_entry(base)
+
+ def get_index_in_temp(self, code, ivar):
+ ret = code.funcstate.allocate_temp(
+ PyrexTypes.widest_numeric_type(
+ ivar.type,
+ PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
+ manage_ref=False)
+ code.putln("%s = %s;" % (ret, ivar.result()))
+ return ret
+
+ def buffer_lookup_code(self, code):
+ """
+ ndarray[1, 2, 3] and memslice[1, 2, 3]
+ """
+ if self.in_nogil_context:
+ if self.is_buffer_access or self.is_memview_index:
+ if code.globalstate.directives['boundscheck']:
+ warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+
+ # Assign indices to temps of at least (s)size_t to allow further index calculations.
+ self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
+
+ # Generate buffer access code using these temps
+ from . import Buffer
+ buffer_entry = self.buffer_entry()
+ if buffer_entry.type.is_buffer:
+ negative_indices = buffer_entry.type.negative_indices
+ else:
+ negative_indices = Buffer.buffer_defaults['negative_indices']
+
+ return buffer_entry, Buffer.put_buffer_lookup_code(
+ entry=buffer_entry,
+ index_signeds=[ivar.type.signed for ivar in self.indices],
+ index_cnames=index_temps,
+ directives=code.globalstate.directives,
+ pos=self.pos, code=code,
+ negative_indices=negative_indices,
+ in_nogil_context=self.in_nogil_context)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ self.generate_subexpr_evaluation_code(code)
+ self.generate_buffer_setitem_code(rhs, code)
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def generate_buffer_setitem_code(self, rhs, code, op=""):
+ base_type = self.base.type
+ if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
+ obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
+ # We have got to do this because we have to declare pythran objects
+ # at the beginning of the functions.
+ # Indeed, Cython uses "goto" statement for error management, and
+ # RAII doesn't work with that kind of construction.
+ # Moreover, the way Pythran expressions are made is that they don't
+ # support move-assignation easily.
+ # This, we explicitly destroy then in-place new objects in this
+ # case.
+ code.putln("__Pyx_call_destructor(%s);" % obj)
+ code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
+ code.putln("%s%s %s= %s;" % (
+ obj,
+ pythran_indexing_code(self.indices),
+ op,
+ rhs.pythran_result()))
+ code.funcstate.release_temp(obj)
+ return
+
+ # Used from generate_assignment_code and InPlaceAssignmentNode
+ buffer_entry, ptrexpr = self.buffer_lookup_code(code)
+
+ if self.buffer_type.dtype.is_pyobject:
+ # Must manage refcounts. XDecref what is already there
+ # and incref what we put in (NumPy allows there to be NULL)
+ ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
+ manage_ref=False)
+ rhs_code = rhs.result()
+ code.putln("%s = %s;" % (ptr, ptrexpr))
+ code.put_xgotref("*%s" % ptr)
+ code.putln("__Pyx_INCREF(%s); __Pyx_XDECREF(*%s);" % (
+ rhs_code, ptr))
+ code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
+ code.put_xgiveref("*%s" % ptr)
+ code.funcstate.release_temp(ptr)
+ else:
+ # Simple case
+ code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
+
+ def generate_result_code(self, code):
+ if is_pythran_expr(self.base.type):
+ res = self.result()
+ code.putln("__Pyx_call_destructor(%s);" % res)
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ res,
+ res,
+ self.base.pythran_result(),
+ pythran_indexing_code(self.indices)))
+ return
+ buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
+ if self.type.is_pyobject:
+ # is_temp is True, so must pull out value and incref it.
+ # NOTE: object temporary results for nodes are declared
+ # as PyObject *, so we need a cast
+ res = self.result()
+ code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code))
+ # NumPy does (occasionally) allow NULL to denote None.
+ code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res))
+ code.putln("__Pyx_INCREF((PyObject*)%s);" % res)
+
+ def free_subexpr_temps(self, code):
+ for temp in self.index_temps:
+ code.funcstate.release_temp(temp)
+ self.index_temps = ()
+ super(BufferIndexNode, self).free_subexpr_temps(code)
+
+
+class MemoryViewIndexNode(BufferIndexNode):
+
+ is_memview_index = True
+ is_buffer_access = False
+ warned_untyped_idx = False
+
+ def analyse_types(self, env, getting=True):
+ # memoryviewslice indexing or slicing
+ from . import MemoryView
+
+ self.is_pythran_mode = has_np_pythran(env)
+ indices = self.indices
+ have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
+
+ if not getting:
+ self.writable_needed = True
+ if self.base.is_name or self.base.is_attribute:
+ self.base.entry.type.writable_needed = True
+
+ self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
+ axes = []
+
+ index_type = PyrexTypes.c_py_ssize_t_type
+ new_indices = []
+
+ if len(indices) - len(newaxes) > self.base.type.ndim:
+ self.type = error_type
+ error(indices[self.base.type.ndim].pos,
+ "Too many indices specified for type %s" % self.base.type)
+ return self
+
+ axis_idx = 0
+ for i, index in enumerate(indices[:]):
+ index = index.analyse_types(env)
+ if index.is_none:
+ self.is_memview_slice = True
+ new_indices.append(index)
+ axes.append(('direct', 'strided'))
+ continue
+
+ access, packing = self.base.type.axes[axis_idx]
+ axis_idx += 1
+
+ if index.is_slice:
+ self.is_memview_slice = True
+ if index.step.is_none:
+ axes.append((access, packing))
+ else:
+ axes.append((access, 'strided'))
+
+ # Coerce start, stop and step to temps of the right type
+ for attr in ('start', 'stop', 'step'):
+ value = getattr(index, attr)
+ if not value.is_none:
+ value = value.coerce_to(index_type, env)
+ #value = value.coerce_to_temp(env)
+ setattr(index, attr, value)
+ new_indices.append(value)
+
+ elif index.type.is_int or index.type.is_pyobject:
+ if index.type.is_pyobject and not self.warned_untyped_idx:
+ warning(index.pos, "Index should be typed for more efficient access", level=2)
+ MemoryViewIndexNode.warned_untyped_idx = True
+
+ self.is_memview_index = True
+ index = index.coerce_to(index_type, env)
+ indices[i] = index
+ new_indices.append(index)
+
+ else:
+ self.type = error_type
+ error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
+ return self
+
+ ### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
+ self.is_memview_index = self.is_memview_index and not self.is_memview_slice
+ self.indices = new_indices
+ # All indices with all start/stop/step for slices.
+ # We need to keep this around.
+ self.original_indices = indices
+ self.nogil = env.nogil
+
+ self.analyse_operation(env, getting, axes)
+ self.wrap_in_nonecheck_node(env)
+ return self
+
+ def analyse_operation(self, env, getting, axes):
+ self.none_error_message = "Cannot index None memoryview slice"
+ self.analyse_buffer_index(env, getting)
+
+ def analyse_broadcast_operation(self, rhs):
+ """
+ Support broadcasting for slice assignment.
+ E.g.
+ m_2d[...] = m_1d # or,
+ m_1d[...] = m_2d # if the leading dimension has extent 1
+ """
+ if self.type.is_memoryviewslice:
+ lhs = self
+ if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
+ lhs.is_memview_broadcast = True
+ rhs.is_memview_broadcast = True
+
+ def analyse_as_memview_scalar_assignment(self, rhs):
+ lhs = self.analyse_assignment(rhs)
+ if lhs:
+ rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
+ return lhs
+ return self
+
+
+class MemoryViewSliceNode(MemoryViewIndexNode):
+
+ is_memview_slice = True
+
+ # No-op slicing operation, this node will be replaced
+ is_ellipsis_noop = False
+ is_memview_scalar_assignment = False
+ is_memview_index = False
+ is_memview_broadcast = False
+
+ def analyse_ellipsis_noop(self, env, getting):
+ """Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
+ ### FIXME: replace directly
+ self.is_ellipsis_noop = all(
+ index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
+ for index in self.indices)
+
+ if self.is_ellipsis_noop:
+ self.type = self.base.type
+
+ def analyse_operation(self, env, getting, axes):
+ from . import MemoryView
+
+ if not getting:
+ self.is_memview_broadcast = True
+ self.none_error_message = "Cannot assign to None memoryview slice"
+ else:
+ self.none_error_message = "Cannot slice None memoryview slice"
+
+ self.analyse_ellipsis_noop(env, getting)
+ if self.is_ellipsis_noop:
+ return
+
+ self.index = None
+ self.is_temp = True
+ self.use_managed_ref = True
+
+ if not MemoryView.validate_axes(self.pos, axes):
+ self.type = error_type
+ return
+
+ self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
+
+ if not (self.base.is_simple() or self.base.result_in_temp()):
+ self.base = self.base.coerce_to_temp(env)
+
+ def analyse_assignment(self, rhs):
+ if not rhs.type.is_memoryviewslice and (
+ self.type.dtype.assignable_from(rhs.type) or
+ rhs.type.is_pyobject):
+ # scalar assignment
+ return MemoryCopyScalar(self.pos, self)
+ else:
+ return MemoryCopySlice(self.pos, self)
+
+ def merged_indices(self, indices):
+ """Return a new list of indices/slices with 'indices' merged into the current ones
+ according to slicing rules.
+ Is used to implement "view[i][j]" => "view[i, j]".
+ Return None if the indices cannot (easily) be merged at compile time.
+ """
+ if not indices:
+ return None
+ # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
+ new_indices = self.original_indices[:]
+ indices = indices[:]
+ for i, s in enumerate(self.original_indices):
+ if s.is_slice:
+ if s.start.is_none and s.stop.is_none and s.step.is_none:
+ # Full slice found, replace by index.
+ new_indices[i] = indices[0]
+ indices.pop(0)
+ if not indices:
+ return new_indices
+ else:
+ # Found something non-trivial, e.g. a partial slice.
+ return None
+ elif not s.type.is_int:
+ # Not a slice, not an integer index => could be anything...
+ return None
+ if indices:
+ if len(new_indices) + len(indices) > self.base.type.ndim:
+ return None
+ new_indices += indices
+ return new_indices
+
+ def is_simple(self):
+ if self.is_ellipsis_noop:
+ # TODO: fix SimpleCallNode.is_simple()
+ return self.base.is_simple() or self.base.result_in_temp()
+
+ return self.result_in_temp()
+
+ def calculate_result_code(self):
+ """This is called in case this is a no-op slicing node"""
+ return self.base.result()
+
+ def generate_result_code(self, code):
+ if self.is_ellipsis_noop:
+ return ### FIXME: remove
+ buffer_entry = self.buffer_entry()
+ have_gil = not self.in_nogil_context
+
+ # TODO Mark: this is insane, do it better
+ have_slices = False
+ it = iter(self.indices)
+ for index in self.original_indices:
+ if index.is_slice:
+ have_slices = True
+ if not index.start.is_none:
+ index.start = next(it)
+ if not index.stop.is_none:
+ index.stop = next(it)
+ if not index.step.is_none:
+ index.step = next(it)
+ else:
+ next(it)
+
+ assert not list(it)
+
+ buffer_entry.generate_buffer_slice_code(
+ code, self.original_indices, self.result(),
+ have_gil=have_gil, have_slices=have_slices,
+ directives=code.globalstate.directives)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ if self.is_ellipsis_noop:
+ self.generate_subexpr_evaluation_code(code)
+ else:
+ self.generate_evaluation_code(code)
+
+ if self.is_memview_scalar_assignment:
+ self.generate_memoryviewslice_assign_scalar_code(rhs, code)
+ else:
+ self.generate_memoryviewslice_setslice_code(rhs, code)
+
+ if self.is_ellipsis_noop:
+ self.generate_subexpr_disposal_code(code)
+ else:
+ self.generate_disposal_code(code)
+
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+
+class MemoryCopyNode(ExprNode):
+ """
+ Wraps a memoryview slice for slice assignment.
+
+ dst: destination mememoryview slice
+ """
+
+ subexprs = ['dst']
+
+ def __init__(self, pos, dst):
+ super(MemoryCopyNode, self).__init__(pos)
+ self.dst = dst
+ self.type = dst.type
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ self.dst.generate_evaluation_code(code)
+ self._generate_assignment_code(rhs, code)
+ self.dst.generate_disposal_code(code)
+ self.dst.free_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+
+class MemoryCopySlice(MemoryCopyNode):
+ """
+ Copy the contents of slice src to slice dst. Does not support indirect
+ slices.
+
+ memslice1[...] = memslice2
+ memslice1[:] = memslice2
+ """
+
+ is_memview_copy_assignment = True
+ copy_slice_cname = "__pyx_memoryview_copy_contents"
+
+ def _generate_assignment_code(self, src, code):
+ dst = self.dst
+
+ src.type.assert_direct_dims(src.pos)
+ dst.type.assert_direct_dims(dst.pos)
+
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
+ src.result(), dst.result(),
+ src.type.ndim, dst.type.ndim,
+ dst.type.dtype.is_pyobject),
+ dst.pos))
+
+
+class MemoryCopyScalar(MemoryCopyNode):
+ """
+ Assign a scalar to a slice. dst must be simple, scalar will be assigned
+ to a correct type and not just something assignable.
+
+ memslice1[...] = 0.0
+ memslice1[:] = 0.0
+ """
+
+ def __init__(self, pos, dst):
+ super(MemoryCopyScalar, self).__init__(pos, dst)
+ self.type = dst.type.dtype
+
+ def _generate_assignment_code(self, scalar, code):
+ from . import MemoryView
+
+ self.dst.type.assert_direct_dims(self.dst.pos)
+
+ dtype = self.dst.type.dtype
+ type_decl = dtype.declaration_code("")
+ slice_decl = self.dst.type.declaration_code("")
+
+ code.begin_block()
+ code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
+ if self.dst.result_in_temp() or self.dst.is_simple():
+ dst_temp = self.dst.result()
+ else:
+ code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
+ dst_temp = "__pyx_temp_slice"
+
+ slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
+ self.dst.type.ndim, code)
+ p = slice_iter_obj.start_loops()
+
+ if dtype.is_pyobject:
+ code.putln("Py_DECREF(*(PyObject **) %s);" % p)
+
+ code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
+
+ if dtype.is_pyobject:
+ code.putln("Py_INCREF(__pyx_temp_scalar);")
+
+ slice_iter_obj.end_loops()
+ code.end_block()
+
+
+class SliceIndexNode(ExprNode):
+ # 2-element slice indexing
+ #
+ # base ExprNode
+ # start ExprNode or None
+ # stop ExprNode or None
+ # slice ExprNode or None constant slice object
+
+ subexprs = ['base', 'start', 'stop', 'slice']
+
+ slice = None
+
+ def infer_type(self, env):
+ base_type = self.base.infer_type(env)
+ if base_type.is_string or base_type.is_cpp_class:
+ return bytes_type
+ elif base_type.is_pyunicode_ptr:
+ return unicode_type
+ elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return base_type
+ elif base_type.is_ptr or base_type.is_array:
+ return PyrexTypes.c_array_type(base_type.base_type, None)
+ return py_object_type
+
+ def inferable_item_node(self, index=0):
+ # slicing shouldn't change the result type of the base, but the index might
+ if index is not not_a_constant and self.start:
+ if self.start.has_constant_result():
+ index += self.start.constant_result
+ else:
+ index = not_a_constant
+ return self.base.inferable_item_node(index)
+
+ def may_be_none(self):
+ base_type = self.base.type
+ if base_type:
+ if base_type.is_string:
+ return False
+ if base_type in (bytes_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def calculate_constant_result(self):
+ if self.start is None:
+ start = None
+ else:
+ start = self.start.constant_result
+ if self.stop is None:
+ stop = None
+ else:
+ stop = self.stop.constant_result
+ self.constant_result = self.base.constant_result[start:stop]
+
+ def compile_time_value(self, denv):
+ base = self.base.compile_time_value(denv)
+ if self.start is None:
+ start = 0
+ else:
+ start = self.start.compile_time_value(denv)
+ if self.stop is None:
+ stop = None
+ else:
+ stop = self.stop.compile_time_value(denv)
+ try:
+ return base[start:stop]
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def analyse_target_types(self, env):
+ node = self.analyse_types(env, getting=False)
+ # when assigning, we must accept any Python type
+ if node.type.is_pyobject:
+ node.type = py_object_type
+ return node
+
+ def analyse_types(self, env, getting=True):
+ self.base = self.base.analyse_types(env)
+
+ if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
+ none_node = NoneNode(self.pos)
+ index = SliceNode(self.pos,
+ start=self.start or none_node,
+ stop=self.stop or none_node,
+ step=none_node)
+ index_node = IndexNode(self.pos, index=index, base=self.base)
+ return index_node.analyse_base_and_index_types(
+ env, getting=getting, setting=not getting,
+ analyse_base=False)
+
+ if self.start:
+ self.start = self.start.analyse_types(env)
+ if self.stop:
+ self.stop = self.stop.analyse_types(env)
+
+ if not env.directives['wraparound']:
+ check_negative_indices(self.start, self.stop)
+
+ base_type = self.base.type
+ if base_type.is_array and not getting:
+ # cannot assign directly to C array => try to assign by making a copy
+ if not self.start and not self.stop:
+ self.type = base_type
+ else:
+ self.type = PyrexTypes.CPtrType(base_type.base_type)
+ elif base_type.is_string or base_type.is_cpp_string:
+ self.type = default_str_type(env)
+ elif base_type.is_pyunicode_ptr:
+ self.type = unicode_type
+ elif base_type.is_ptr:
+ self.type = base_type
+ elif base_type.is_array:
+ # we need a ptr type here instead of an array type, as
+ # array types can result in invalid type casts in the C
+ # code
+ self.type = PyrexTypes.CPtrType(base_type.base_type)
+ else:
+ self.base = self.base.coerce_to_pyobject(env)
+ self.type = py_object_type
+ if base_type.is_builtin_type:
+ # slicing builtin types returns something of the same type
+ self.type = base_type
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+
+ if self.type is py_object_type:
+ if (not self.start or self.start.is_literal) and \
+ (not self.stop or self.stop.is_literal):
+ # cache the constant slice object, in case we need it
+ none_node = NoneNode(self.pos)
+ self.slice = SliceNode(
+ self.pos,
+ start=copy.deepcopy(self.start or none_node),
+ stop=copy.deepcopy(self.stop or none_node),
+ step=none_node
+ ).analyse_types(env)
+ else:
+ c_int = PyrexTypes.c_py_ssize_t_type
+
+ def allow_none(node, default_value, env):
+ # Coerce to Py_ssize_t, but allow None as meaning the default slice bound.
+ from .UtilNodes import EvalWithTempExprNode, ResultRefNode
+
+ node_ref = ResultRefNode(node)
+ new_expr = CondExprNode(
+ node.pos,
+ true_val=IntNode(
+ node.pos,
+ type=c_int,
+ value=default_value,
+ constant_result=int(default_value) if default_value.isdigit() else not_a_constant,
+ ),
+ false_val=node_ref.coerce_to(c_int, env),
+ test=PrimaryCmpNode(
+ node.pos,
+ operand1=node_ref,
+ operator='is',
+ operand2=NoneNode(node.pos),
+ ).analyse_types(env)
+ ).analyse_result_type(env)
+ return EvalWithTempExprNode(node_ref, new_expr)
+
+ if self.start:
+ if self.start.type.is_pyobject:
+ self.start = allow_none(self.start, '0', env)
+ self.start = self.start.coerce_to(c_int, env)
+ if self.stop:
+ if self.stop.type.is_pyobject:
+ self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env)
+ self.stop = self.stop.coerce_to(c_int, env)
+ self.is_temp = 1
+ return self
+
+ def analyse_as_type(self, env):
+ base_type = self.base.analyse_as_type(env)
+ if base_type and not base_type.is_pyobject:
+ if not self.start and not self.stop:
+ # memory view
+ from . import MemoryView
+ env.use_utility_code(MemoryView.view_utility_code)
+ none_node = NoneNode(self.pos)
+ slice_node = SliceNode(
+ self.pos,
+ start=none_node,
+ stop=none_node,
+ step=none_node,
+ )
+ return PyrexTypes.MemoryViewSliceType(
+ base_type, MemoryView.get_axes_specs(env, [slice_node]))
+ return None
+
+ nogil_check = Node.gil_error
+ gil_message = "Slicing Python object"
+
+ get_slice_utility_code = TempitaUtilityCode.load(
+ "SliceObject", "ObjectHandling.c", context={'access': 'Get'})
+
+ set_slice_utility_code = TempitaUtilityCode.load(
+ "SliceObject", "ObjectHandling.c", context={'access': 'Set'})
+
+ def coerce_to(self, dst_type, env):
+ if ((self.base.type.is_string or self.base.type.is_cpp_string)
+ and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
+ if (dst_type not in (bytes_type, bytearray_type)
+ and not env.directives['c_string_encoding']):
+ error(self.pos,
+ "default encoding required for conversion from '%s' to '%s'" %
+ (self.base.type, dst_type))
+ self.type = dst_type
+ if dst_type.is_array and self.base.type.is_array:
+ if not self.start and not self.stop:
+ # redundant slice building, copy C arrays directly
+ return self.base.coerce_to(dst_type, env)
+ # else: check array size if possible
+ return super(SliceIndexNode, self).coerce_to(dst_type, env)
+
+ def generate_result_code(self, code):
+ if not self.type.is_pyobject:
+ error(self.pos,
+ "Slicing is not currently supported for '%s'." % self.type)
+ return
+
+ base_result = self.base.result()
+ result = self.result()
+ start_code = self.start_code()
+ stop_code = self.stop_code()
+ if self.base.type.is_string:
+ base_result = self.base.result()
+ if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
+ base_result = '((const char*)%s)' % base_result
+ if self.type is bytearray_type:
+ type_name = 'ByteArray'
+ else:
+ type_name = self.type.name.title()
+ if self.stop is None:
+ code.putln(
+ "%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
+ result,
+ type_name,
+ base_result,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ code.putln(
+ "%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
+ result,
+ type_name,
+ base_result,
+ start_code,
+ stop_code,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ elif self.base.type.is_pyunicode_ptr:
+ base_result = self.base.result()
+ if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
+ base_result = '((const Py_UNICODE*)%s)' % base_result
+ if self.stop is None:
+ code.putln(
+ "%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ code.putln(
+ "%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ stop_code,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+
+ elif self.base.type is unicode_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
+ code.putln(
+ "%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ stop_code,
+ code.error_goto_if_null(result, self.pos)))
+ elif self.type is py_object_type:
+ code.globalstate.use_utility_code(self.get_slice_utility_code)
+ (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice) = self.get_slice_config()
+ code.putln(
+ "%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
+ result,
+ self.base.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound']),
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ if self.base.type is list_type:
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
+ cfunc = '__Pyx_PyList_GetSlice'
+ elif self.base.type is tuple_type:
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
+ cfunc = '__Pyx_PyTuple_GetSlice'
+ else:
+ cfunc = 'PySequence_GetSlice'
+ code.putln(
+ "%s = %s(%s, %s, %s); %s" % (
+ result,
+ cfunc,
+ self.base.py_result(),
+ start_code,
+ stop_code,
+ code.error_goto_if_null(result, self.pos)))
+ code.put_gotref(self.py_result())
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.generate_subexpr_evaluation_code(code)
+ if self.type.is_pyobject:
+ code.globalstate.use_utility_code(self.set_slice_utility_code)
+ (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice) = self.get_slice_config()
+ code.put_error_if_neg(self.pos,
+ "__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
+ self.base.py_result(),
+ rhs.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound'])))
+ else:
+ start_offset = self.start_code() if self.start else '0'
+ if rhs.type.is_array:
+ array_length = rhs.type.size
+ self.generate_slice_guard_code(code, array_length)
+ else:
+ array_length = '%s - %s' % (self.stop_code(), start_offset)
+
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
+ self.base.result(), start_offset,
+ rhs.result(),
+ self.base.result(), array_length
+ ))
+
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ if not self.base.type.is_pyobject:
+ error(self.pos,
+ "Deleting slices is only supported for Python types, not '%s'." % self.type)
+ return
+ self.generate_subexpr_evaluation_code(code)
+ code.globalstate.use_utility_code(self.set_slice_utility_code)
+ (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice) = self.get_slice_config()
+ code.put_error_if_neg(self.pos,
+ "__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
+ self.base.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound'])))
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+ def get_slice_config(self):
+ has_c_start, c_start, py_start = False, '0', 'NULL'
+ if self.start:
+ has_c_start = not self.start.type.is_pyobject
+ if has_c_start:
+ c_start = self.start.result()
+ else:
+ py_start = '&%s' % self.start.py_result()
+ has_c_stop, c_stop, py_stop = False, '0', 'NULL'
+ if self.stop:
+ has_c_stop = not self.stop.type.is_pyobject
+ if has_c_stop:
+ c_stop = self.stop.result()
+ else:
+ py_stop = '&%s' % self.stop.py_result()
+ py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
+ return (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice)
+
+ def generate_slice_guard_code(self, code, target_size):
+ if not self.base.type.is_array:
+ return
+ slice_size = self.base.type.size
+ try:
+ total_length = slice_size = int(slice_size)
+ except ValueError:
+ total_length = None
+
+ start = stop = None
+ if self.stop:
+ stop = self.stop.result()
+ try:
+ stop = int(stop)
+ if stop < 0:
+ if total_length is None:
+ slice_size = '%s + %d' % (slice_size, stop)
+ else:
+ slice_size += stop
+ else:
+ slice_size = stop
+ stop = None
+ except ValueError:
+ pass
+
+ if self.start:
+ start = self.start.result()
+ try:
+ start = int(start)
+ if start < 0:
+ if total_length is None:
+ start = '%s + %d' % (self.base.type.size, start)
+ else:
+ start += total_length
+ if isinstance(slice_size, _py_int_types):
+ slice_size -= start
+ else:
+ slice_size = '%s - (%s)' % (slice_size, start)
+ start = None
+ except ValueError:
+ pass
+
+ runtime_check = None
+ compile_time_check = False
+ try:
+ int_target_size = int(target_size)
+ except ValueError:
+ int_target_size = None
+ else:
+ compile_time_check = isinstance(slice_size, _py_int_types)
+
+ if compile_time_check and slice_size < 0:
+ if int_target_size > 0:
+ error(self.pos, "Assignment to empty slice.")
+ elif compile_time_check and start is None and stop is None:
+ # we know the exact slice length
+ if int_target_size != slice_size:
+ error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
+ slice_size, target_size))
+ elif start is not None:
+ if stop is None:
+ stop = slice_size
+ runtime_check = "(%s)-(%s)" % (stop, start)
+ elif stop is not None:
+ runtime_check = stop
+ else:
+ runtime_check = slice_size
+
+ if runtime_check:
+ code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
+ code.putln(
+ 'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
+ ' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
+ ' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
+ target_size, runtime_check))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ def start_code(self):
+ if self.start:
+ return self.start.result()
+ else:
+ return "0"
+
+ def stop_code(self):
+ if self.stop:
+ return self.stop.result()
+ elif self.base.type.is_array:
+ return self.base.type.size
+ else:
+ return "PY_SSIZE_T_MAX"
+
+ def calculate_result_code(self):
+ # self.result() is not used, but this method must exist
+ return "<unused>"
+
+
+class SliceNode(ExprNode):
+ # start:stop:step in subscript list
+ #
+ # start ExprNode
+ # stop ExprNode
+ # step ExprNode
+
+ subexprs = ['start', 'stop', 'step']
+ is_slice = True
+ type = slice_type
+ is_temp = 1
+
+ def calculate_constant_result(self):
+ self.constant_result = slice(
+ self.start.constant_result,
+ self.stop.constant_result,
+ self.step.constant_result)
+
+ def compile_time_value(self, denv):
+ start = self.start.compile_time_value(denv)
+ stop = self.stop.compile_time_value(denv)
+ step = self.step.compile_time_value(denv)
+ try:
+ return slice(start, stop, step)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ start = self.start.analyse_types(env)
+ stop = self.stop.analyse_types(env)
+ step = self.step.analyse_types(env)
+ self.start = start.coerce_to_pyobject(env)
+ self.stop = stop.coerce_to_pyobject(env)
+ self.step = step.coerce_to_pyobject(env)
+ if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
+ self.is_literal = True
+ self.is_temp = False
+ return self
+
+ gil_message = "Constructing Python slice object"
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def generate_result_code(self, code):
+ if self.is_literal:
+ dedup_key = make_dedup_key(self.type, (self,))
+ self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key)
+ code = code.get_cached_constants_writer(self.result_code)
+ if code is None:
+ return # already initialised
+ code.mark_pos(self.pos)
+
+ code.putln(
+ "%s = PySlice_New(%s, %s, %s); %s" % (
+ self.result(),
+ self.start.py_result(),
+ self.stop.py_result(),
+ self.step.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ if self.is_literal:
+ code.put_giveref(self.py_result())
+
+class SliceIntNode(SliceNode):
+ # start:stop:step in subscript list
+ # This is just a node to hold start,stop and step nodes that can be
+ # converted to integers. This does not generate a slice python object.
+ #
+ # start ExprNode
+ # stop ExprNode
+ # step ExprNode
+
+ is_temp = 0
+
+ def calculate_constant_result(self):
+ self.constant_result = slice(
+ self.start.constant_result,
+ self.stop.constant_result,
+ self.step.constant_result)
+
+ def compile_time_value(self, denv):
+ start = self.start.compile_time_value(denv)
+ stop = self.stop.compile_time_value(denv)
+ step = self.step.compile_time_value(denv)
+ try:
+ return slice(start, stop, step)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ self.start = self.start.analyse_types(env)
+ self.stop = self.stop.analyse_types(env)
+ self.step = self.step.analyse_types(env)
+
+ if not self.start.is_none:
+ self.start = self.start.coerce_to_integer(env)
+ if not self.stop.is_none:
+ self.stop = self.stop.coerce_to_integer(env)
+ if not self.step.is_none:
+ self.step = self.step.coerce_to_integer(env)
+
+ if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
+ self.is_literal = True
+ self.is_temp = False
+ return self
+
+ def calculate_result_code(self):
+ pass
+
+ def generate_result_code(self, code):
+ for a in self.start,self.stop,self.step:
+ if isinstance(a, CloneNode):
+ a.arg.result()
+
+
+class CallNode(ExprNode):
+
+ # allow overriding the default 'may_be_none' behaviour
+ may_return_none = None
+
+ def infer_type(self, env):
+ # TODO(robertwb): Reduce redundancy with analyse_types.
+ function = self.function
+ func_type = function.infer_type(env)
+ if isinstance(function, NewExprNode):
+ # note: needs call to infer_type() above
+ return PyrexTypes.CPtrType(function.class_type)
+ if func_type is py_object_type:
+ # function might have lied for safety => try to find better type
+ entry = getattr(function, 'entry', None)
+ if entry is not None:
+ func_type = entry.type or func_type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ if func_type.is_cfunction:
+ if getattr(self.function, 'entry', None) and hasattr(self, 'args'):
+ alternatives = self.function.entry.all_alternatives()
+ arg_types = [arg.infer_type(env) for arg in self.args]
+ func_entry = PyrexTypes.best_match(arg_types, alternatives)
+ if func_entry:
+ func_type = func_entry.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ return func_type.return_type
+ return func_type.return_type
+ elif func_type is type_type:
+ if function.is_name and function.entry and function.entry.type:
+ result_type = function.entry.type
+ if result_type.is_extension_type:
+ return result_type
+ elif result_type.is_builtin_type:
+ if function.entry.name == 'float':
+ return PyrexTypes.c_double_type
+ elif function.entry.name in Builtin.types_that_construct_their_instance:
+ return result_type
+ return py_object_type
+
+ def type_dependencies(self, env):
+ # TODO: Update when Danilo's C++ code merged in to handle the
+ # the case of function overloading.
+ return self.function.type_dependencies(env)
+
+ def is_simple(self):
+ # C function calls could be considered simple, but they may
+ # have side-effects that may hit when multiple operations must
+ # be effected in order, e.g. when constructing the argument
+ # sequence for a function call or comparing values.
+ return False
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ func_type = self.function.type
+ if func_type is type_type and self.function.is_name:
+ entry = self.function.entry
+ if entry.type.is_extension_type:
+ return False
+ if (entry.type.is_builtin_type and
+ entry.name in Builtin.types_that_construct_their_instance):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def set_py_result_type(self, function, func_type=None):
+ if func_type is None:
+ func_type = function.type
+ if func_type is Builtin.type_type and (
+ function.is_name and
+ function.entry and
+ function.entry.is_builtin and
+ function.entry.name in Builtin.types_that_construct_their_instance):
+ # calling a builtin type that returns a specific object type
+ if function.entry.name == 'float':
+ # the following will come true later on in a transform
+ self.type = PyrexTypes.c_double_type
+ self.result_ctype = PyrexTypes.c_double_type
+ else:
+ self.type = Builtin.builtin_types[function.entry.name]
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ elif function.is_name and function.type_entry:
+ # We are calling an extension type constructor. As long as we do not
+ # support __new__(), the result type is clear
+ self.type = function.type_entry.type
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ else:
+ self.type = py_object_type
+
+ def analyse_as_type_constructor(self, env):
+ type = self.function.analyse_as_type(env)
+ if type and type.is_struct_or_union:
+ args, kwds = self.explicit_args_kwds()
+ items = []
+ for arg, member in zip(args, type.scope.var_entries):
+ items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
+ if kwds:
+ items += kwds.key_value_pairs
+ self.key_value_pairs = items
+ self.__class__ = DictNode
+ self.analyse_types(env) # FIXME
+ self.coerce_to(type, env)
+ return True
+ elif type and type.is_cpp_class:
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ constructor = type.scope.lookup("<init>")
+ if not constructor:
+ error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
+ self.type = error_type
+ return self
+ self.function = RawCNameExprNode(self.function.pos, constructor.type)
+ self.function.entry = constructor
+ self.function.set_cname(type.empty_declaration_code())
+ self.analyse_c_function_call(env)
+ self.type = type
+ return True
+
+ def is_lvalue(self):
+ return self.type.is_reference
+
+ def nogil_check(self, env):
+ func_type = self.function_type()
+ if func_type.is_pyobject:
+ self.gil_error()
+ elif not func_type.is_error and not getattr(func_type, 'nogil', False):
+ self.gil_error()
+
+ gil_message = "Calling gil-requiring function"
+
+
+class SimpleCallNode(CallNode):
+ # Function call without keyword, * or ** args.
+ #
+ # function ExprNode
+ # args [ExprNode]
+ # arg_tuple ExprNode or None used internally
+ # self ExprNode or None used internally
+ # coerced_self ExprNode or None used internally
+ # wrapper_call bool used internally
+ # has_optional_args bool used internally
+ # nogil bool used internally
+
+ subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
+
+ self = None
+ coerced_self = None
+ arg_tuple = None
+ wrapper_call = False
+ has_optional_args = False
+ nogil = False
+ analysed = False
+ overflowcheck = False
+
+ def compile_time_value(self, denv):
+ function = self.function.compile_time_value(denv)
+ args = [arg.compile_time_value(denv) for arg in self.args]
+ try:
+ return function(*args)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def analyse_as_type(self, env):
+ attr = self.function.as_cython_attribute()
+ if attr == 'pointer':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ else:
+ type = self.args[0].analyse_as_type(env)
+ if not type:
+ error(self.args[0].pos, "Unknown type")
+ else:
+ return PyrexTypes.CPtrType(type)
+ elif attr == 'typeof':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ operand = self.args[0].analyse_types(env)
+ return operand.type
+
+ def explicit_args_kwds(self):
+ return self.args, None
+
+ def analyse_types(self, env):
+ if self.analyse_as_type_constructor(env):
+ return self
+ if self.analysed:
+ return self
+ self.analysed = True
+ self.function.is_called = 1
+ self.function = self.function.analyse_types(env)
+ function = self.function
+
+ if function.is_attribute and function.entry and function.entry.is_cmethod:
+ # Take ownership of the object from which the attribute
+ # was obtained, because we need to pass it as 'self'.
+ self.self = function.obj
+ function.obj = CloneNode(self.self)
+
+ func_type = self.function_type()
+ self.is_numpy_call_with_exprs = False
+ if (has_np_pythran(env) and function.is_numpy_attribute and
+ pythran_is_numpy_func_supported(function)):
+ has_pythran_args = True
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
+ self.arg_tuple = self.arg_tuple.analyse_types(env)
+ for arg in self.arg_tuple.args:
+ has_pythran_args &= is_pythran_supported_node_or_none(arg)
+ self.is_numpy_call_with_exprs = bool(has_pythran_args)
+ if self.is_numpy_call_with_exprs:
+ env.add_include_file(pythran_get_func_include_file(function))
+ return NumPyMethodCallNode.from_node(
+ self,
+ function_cname=pythran_functor(function),
+ arg_tuple=self.arg_tuple,
+ type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)),
+ )
+ elif func_type.is_pyobject:
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
+ self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
+ self.args = None
+ self.set_py_result_type(function, func_type)
+ self.is_temp = 1
+ else:
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ self.analyse_c_function_call(env)
+ if func_type.exception_check == '+':
+ self.is_temp = True
+ return self
+
+ def function_type(self):
+ # Return the type of the function being called, coercing a function
+ # pointer to a function if necessary. If the function has fused
+ # arguments, return the specific type.
+ func_type = self.function.type
+
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+
+ return func_type
+
+ def analyse_c_function_call(self, env):
+ func_type = self.function.type
+ if func_type is error_type:
+ self.type = error_type
+ return
+
+ if func_type.is_cfunction and func_type.is_static_method:
+ if self.self and self.self.type.is_extension_type:
+ # To support this we'd need to pass self to determine whether
+ # it was overloaded in Python space (possibly via a Cython
+ # superclass turning a cdef method into a cpdef one).
+ error(self.pos, "Cannot call a static method on an instance variable.")
+ args = self.args
+ elif self.self:
+ args = [self.self] + self.args
+ else:
+ args = self.args
+
+ if func_type.is_cpp_class:
+ overloaded_entry = self.function.type.scope.lookup("operator()")
+ if overloaded_entry is None:
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
+ elif hasattr(self.function, 'entry'):
+ overloaded_entry = self.function.entry
+ elif self.function.is_subscript and self.function.is_fused_index:
+ overloaded_entry = self.function.type.entry
+ else:
+ overloaded_entry = None
+
+ if overloaded_entry:
+ if self.function.type.is_fused:
+ functypes = self.function.type.get_all_specialized_function_types()
+ alternatives = [f.entry for f in functypes]
+ else:
+ alternatives = overloaded_entry.all_alternatives()
+
+ entry = PyrexTypes.best_match(
+ [arg.type for arg in args], alternatives, self.pos, env, args)
+
+ if not entry:
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
+
+ entry.used = True
+ if not func_type.is_cpp_class:
+ self.function.entry = entry
+ self.function.type = entry.type
+ func_type = self.function_type()
+ else:
+ entry = None
+ func_type = self.function_type()
+ if not func_type.is_cfunction:
+ error(self.pos, "Calling non-function type '%s'" % func_type)
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
+
+ # Check no. of args
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
+ actual_nargs = len(args)
+ if func_type.optional_arg_count and expected_nargs != actual_nargs:
+ self.has_optional_args = 1
+ self.is_temp = 1
+
+ # check 'self' argument
+ if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
+ formal_arg = func_type.args[0]
+ arg = args[0]
+ if formal_arg.not_none:
+ if self.self:
+ self.self = self.self.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
+ error='PyExc_AttributeError',
+ format_args=[entry.name])
+ else:
+ # unbound method
+ arg = arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[entry.name, formal_arg.type.name])
+ if self.self:
+ if formal_arg.accept_builtin_subtypes:
+ arg = CMethodSelfCloneNode(self.self)
+ else:
+ arg = CloneNode(self.self)
+ arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
+ elif formal_arg.type.is_builtin_type:
+ # special case: unbound methods of builtins accept subtypes
+ arg = arg.coerce_to(formal_arg.type, env)
+ if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
+ arg.exact_builtin_type = False
+ args[0] = arg
+
+ # Coerce arguments
+ some_args_in_temps = False
+ for i in range(min(max_nargs, actual_nargs)):
+ formal_arg = func_type.args[i]
+ formal_type = formal_arg.type
+ arg = args[i].coerce_to(formal_type, env)
+ if formal_arg.not_none:
+ # C methods must do the None checks at *call* time
+ arg = arg.as_none_safe_node(
+ "cannot pass None into a C function argument that is declared 'not None'")
+ if arg.is_temp:
+ if i > 0:
+ # first argument in temp doesn't impact subsequent arguments
+ some_args_in_temps = True
+ elif arg.type.is_pyobject and not env.nogil:
+ if i == 0 and self.self is not None:
+ # a method's cloned "self" argument is ok
+ pass
+ elif arg.nonlocally_immutable():
+ # plain local variables are ok
+ pass
+ else:
+ # we do not safely own the argument's reference,
+ # but we must make sure it cannot be collected
+ # before we return from the function, so we create
+ # an owned temp reference to it
+ if i > 0: # first argument doesn't matter
+ some_args_in_temps = True
+ arg = arg.coerce_to_temp(env)
+ args[i] = arg
+
+ # handle additional varargs parameters
+ for i in range(max_nargs, actual_nargs):
+ arg = args[i]
+ if arg.type.is_pyobject:
+ if arg.type is str_type:
+ arg_ctype = PyrexTypes.c_char_ptr_type
+ else:
+ arg_ctype = arg.type.default_coerced_ctype()
+ if arg_ctype is None:
+ error(self.args[i].pos,
+ "Python object cannot be passed as a varargs parameter")
+ else:
+ args[i] = arg = arg.coerce_to(arg_ctype, env)
+ if arg.is_temp and i > 0:
+ some_args_in_temps = True
+
+ if some_args_in_temps:
+ # if some args are temps and others are not, they may get
+ # constructed in the wrong order (temps first) => make
+ # sure they are either all temps or all not temps (except
+ # for the last argument, which is evaluated last in any
+ # case)
+ for i in range(actual_nargs-1):
+ if i == 0 and self.self is not None:
+ continue # self is ok
+ arg = args[i]
+ if arg.nonlocally_immutable():
+ # locals, C functions, unassignable types are safe.
+ pass
+ elif arg.type.is_cpp_class:
+ # Assignment has side effects, avoid.
+ pass
+ elif env.nogil and arg.type.is_pyobject:
+ # can't copy a Python reference into a temp in nogil
+ # env (this is safe: a construction would fail in
+ # nogil anyway)
+ pass
+ else:
+ #self.args[i] = arg.coerce_to_temp(env)
+ # instead: issue a warning
+ if i > 0 or i == 1 and self.self is not None: # skip first arg
+ warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
+ break
+
+ self.args[:] = args
+
+ # Calc result type and code fragment
+ if isinstance(self.function, NewExprNode):
+ self.type = PyrexTypes.CPtrType(self.function.class_type)
+ else:
+ self.type = func_type.return_type
+
+ if self.function.is_name or self.function.is_attribute:
+ func_entry = self.function.entry
+ if func_entry and (func_entry.utility_code or func_entry.utility_code_definition):
+ self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
+
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+ self.is_temp = 1
+ elif func_type.exception_value is not None or func_type.exception_check:
+ self.is_temp = 1
+ elif self.type.is_memoryviewslice:
+ self.is_temp = 1
+ # func_type.exception_check = True
+
+ if self.is_temp and self.type.is_reference:
+ self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
+
+ # Called in 'nogil' context?
+ self.nogil = env.nogil
+ if (self.nogil and
+ func_type.exception_check and
+ func_type.exception_check != '+'):
+ env.use_utility_code(pyerr_occurred_withgil_utility_code)
+ # C++ exception handler
+ if func_type.exception_check == '+':
+ if func_type.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+
+ self.overflowcheck = env.directives['overflowcheck']
+
+ def calculate_result_code(self):
+ return self.c_call_code()
+
+ def c_call_code(self):
+ func_type = self.function_type()
+ if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
+ return "<error>"
+ formal_args = func_type.args
+ arg_list_code = []
+ args = list(zip(formal_args, self.args))
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
+ actual_nargs = len(self.args)
+ for formal_arg, actual_arg in args[:expected_nargs]:
+ arg_code = actual_arg.result_as(formal_arg.type)
+ arg_list_code.append(arg_code)
+
+ if func_type.is_overridable:
+ arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
+
+ if func_type.optional_arg_count:
+ if expected_nargs == actual_nargs:
+ optional_args = 'NULL'
+ else:
+ optional_args = "&%s" % self.opt_arg_struct
+ arg_list_code.append(optional_args)
+
+ for actual_arg in self.args[len(formal_args):]:
+ arg_list_code.append(actual_arg.result())
+
+ result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
+ return result
+
+ def is_c_result_required(self):
+ func_type = self.function_type()
+ if not func_type.exception_value or func_type.exception_check == '+':
+ return False # skip allocation of unused result temp
+ return True
+
+ def generate_evaluation_code(self, code):
+ function = self.function
+ if function.is_name or function.is_attribute:
+ code.globalstate.use_entry_utility_code(function.entry)
+
+ abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong')
+ is_signed_int = self.type.is_int and self.type.signed
+ if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Common", "Overflow.c"))
+ code.putln('if (unlikely(%s == __PYX_MIN(%s))) {\
+ PyErr_SetString(PyExc_OverflowError,\
+ "Trying to take the absolute value of the most negative integer is not defined."); %s; }' % (
+ self.args[0].result(),
+ self.args[0].type.empty_declaration_code(),
+ code.error_goto(self.pos)))
+
+ if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
+ self.arg_tuple.args and self.arg_tuple.is_literal):
+ super(SimpleCallNode, self).generate_evaluation_code(code)
+ return
+
+ # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
+ arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
+ subexprs = (self.self, self.coerced_self, function, arg)
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ assert self.is_temp
+ self.allocate_temp_result(code)
+
+ if arg is None:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallNoArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
+ self.result(),
+ function.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
+ self.result(),
+ function.py_result(),
+ arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_disposal_code(code)
+ subexpr.free_temps(code)
+
+ def generate_result_code(self, code):
+ func_type = self.function_type()
+ if func_type.is_pyobject:
+ arg_code = self.arg_tuple.py_result()
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ self.function.py_result(),
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ elif func_type.is_cfunction:
+ if self.has_optional_args:
+ actual_nargs = len(self.args)
+ expected_nargs = len(func_type.args) - func_type.optional_arg_count
+ self.opt_arg_struct = code.funcstate.allocate_temp(
+ func_type.op_arg_struct.base_type, manage_ref=True)
+ code.putln("%s.%s = %s;" % (
+ self.opt_arg_struct,
+ Naming.pyrex_prefix + "n",
+ len(self.args) - expected_nargs))
+ args = list(zip(func_type.args, self.args))
+ for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
+ code.putln("%s.%s = %s;" % (
+ self.opt_arg_struct,
+ func_type.opt_arg_cname(formal_arg.name),
+ actual_arg.result_as(formal_arg.type)))
+ exc_checks = []
+ if self.type.is_pyobject and self.is_temp:
+ exc_checks.append("!%s" % self.result())
+ elif self.type.is_memoryviewslice:
+ assert self.is_temp
+ exc_checks.append(self.type.error_condition(self.result()))
+ elif func_type.exception_check != '+':
+ exc_val = func_type.exception_value
+ exc_check = func_type.exception_check
+ if exc_val is not None:
+ exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
+ if exc_check:
+ if self.nogil:
+ exc_checks.append("__Pyx_ErrOccurredWithGIL()")
+ else:
+ exc_checks.append("PyErr_Occurred()")
+ if self.is_temp or exc_checks:
+ rhs = self.c_call_code()
+ if self.result():
+ lhs = "%s = " % self.result()
+ if self.is_temp and self.type.is_pyobject:
+ #return_type = self.type # func_type.return_type
+ #print "SimpleCallNode.generate_result_code: casting", rhs, \
+ # "from", return_type, "to pyobject" ###
+ rhs = typecast(py_object_type, self.type, rhs)
+ else:
+ lhs = ""
+ if func_type.exception_check == '+':
+ translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
+ self.result() if self.type.is_pyobject else None,
+ func_type.exception_value, self.nogil)
+ else:
+ if exc_checks:
+ goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
+ else:
+ goto_error = ""
+ code.putln("%s%s; %s" % (lhs, rhs, goto_error))
+ if self.type.is_pyobject and self.result():
+ code.put_gotref(self.py_result())
+ if self.has_optional_args:
+ code.funcstate.release_temp(self.opt_arg_struct)
+
+
+class NumPyMethodCallNode(ExprNode):
+ # Pythran call to a NumPy function or method.
+ #
+ # function_cname string the function/method to call
+ # arg_tuple TupleNode the arguments as an args tuple
+
+ subexprs = ['arg_tuple']
+ is_temp = True
+ may_return_none = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ code.putln("// function evaluation code for numpy function")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){%s{}(%s)};" % (
+ self.result(),
+ self.result(),
+ self.function_cname,
+ ", ".join(a.pythran_result() for a in args)))
+
+
+class PyMethodCallNode(SimpleCallNode):
+ # Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
+ # Allows the self argument to be injected directly instead of repacking a tuple for it.
+ #
+ # function ExprNode the function/method object to call
+ # arg_tuple TupleNode the arguments for the args tuple
+
+ subexprs = ['function', 'arg_tuple']
+ is_temp = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ self.function.generate_evaluation_code(code)
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ # make sure function is in temp so that we can replace the reference below if it's a method
+ reuse_function_temp = self.function.is_temp
+ if reuse_function_temp:
+ function = self.function.result()
+ else:
+ function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ self.function.make_owned_reference(code)
+ code.put("%s = %s; " % (function, self.function.py_result()))
+ self.function.generate_disposal_code(code)
+ self.function.free_temps(code)
+
+ self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln("%s = NULL;" % self_arg)
+ arg_offset_cname = None
+ if len(args) > 1:
+ arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = 0;" % arg_offset_cname)
+
+ def attribute_is_likely_method(attr):
+ obj = attr.obj
+ if obj.is_name and obj.entry.is_pyglobal:
+ return False # more likely to be a function
+ return True
+
+ if self.function.is_attribute:
+ likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
+ elif self.function.is_name and self.function.cf_state:
+ # not an attribute itself, but might have been assigned from one (e.g. bound method)
+ for assignment in self.function.cf_state:
+ value = assignment.rhs
+ if value and value.is_attribute and value.obj.type and value.obj.type.is_pyobject:
+ if attribute_is_likely_method(value):
+ likely_method = 'likely'
+ break
+ else:
+ likely_method = 'unlikely'
+ else:
+ likely_method = 'unlikely'
+
+ code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function))
+ code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
+ # the following is always true in Py3 (kept only for safety),
+ # but is false for unbound methods in Py2
+ code.putln("if (likely(%s)) {" % self_arg)
+ code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
+ code.put_incref(self_arg, py_object_type)
+ code.put_incref("function", py_object_type)
+ # free method object as early to possible to enable reuse from CPython's freelist
+ code.put_decref_set(function, "function")
+ if len(args) > 1:
+ code.putln("%s = 1;" % arg_offset_cname)
+ code.putln("}")
+ code.putln("}")
+
+ if not args:
+ # fastest special case: try to avoid tuple creation
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = (%s) ? __Pyx_PyObject_CallOneArg(%s, %s) : __Pyx_PyObject_CallNoArg(%s);" % (
+ self.result(), self_arg,
+ function, self_arg,
+ function))
+ code.put_xdecref_clear(self_arg, py_object_type)
+ code.funcstate.release_temp(self_arg)
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ code.put_gotref(self.py_result())
+ elif len(args) == 1:
+ # fastest special case: try to avoid tuple creation
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectCall2Args", "ObjectHandling.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
+ arg = args[0]
+ code.putln(
+ "%s = (%s) ? __Pyx_PyObject_Call2Args(%s, %s, %s) : __Pyx_PyObject_CallOneArg(%s, %s);" % (
+ self.result(), self_arg,
+ function, self_arg, arg.py_result(),
+ function, arg.py_result()))
+ code.put_xdecref_clear(self_arg, py_object_type)
+ code.funcstate.release_temp(self_arg)
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ code.put_gotref(self.py_result())
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyFunctionFastCall", "ObjectHandling.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyCFunctionFastCall", "ObjectHandling.c"))
+ for test_func, call_prefix in [('PyFunction_Check', 'Py'), ('__Pyx_PyFastCFunction_Check', 'PyC')]:
+ code.putln("#if CYTHON_FAST_%sCALL" % call_prefix.upper())
+ code.putln("if (%s(%s)) {" % (test_func, function))
+ code.putln("PyObject *%s[%d] = {%s, %s};" % (
+ Naming.quick_temp_cname,
+ len(args)+1,
+ self_arg,
+ ', '.join(arg.py_result() for arg in args)))
+ code.putln("%s = __Pyx_%sFunction_FastCall(%s, %s+1-%s, %d+%s); %s" % (
+ self.result(),
+ call_prefix,
+ function,
+ Naming.quick_temp_cname,
+ arg_offset_cname,
+ len(args),
+ arg_offset_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_xdecref_clear(self_arg, py_object_type)
+ code.put_gotref(self.py_result())
+ for arg in args:
+ arg.generate_disposal_code(code)
+ code.putln("} else")
+ code.putln("#endif")
+
+ code.putln("{")
+ args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln("%s = PyTuple_New(%d+%s); %s" % (
+ args_tuple, len(args), arg_offset_cname,
+ code.error_goto_if_null(args_tuple, self.pos)))
+ code.put_gotref(args_tuple)
+
+ if len(args) > 1:
+ code.putln("if (%s) {" % self_arg)
+ code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % (
+ self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case
+ code.funcstate.release_temp(self_arg)
+ if len(args) > 1:
+ code.putln("}")
+
+ for i, arg in enumerate(args):
+ arg.make_owned_reference(code)
+ code.put_giveref(arg.py_result())
+ code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
+ args_tuple, i, arg_offset_cname, arg.py_result()))
+ if len(args) > 1:
+ code.funcstate.release_temp(arg_offset_cname)
+
+ for arg in args:
+ arg.generate_post_assignment_code(code)
+ arg.free_temps(code)
+
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ function, args_tuple,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ code.put_decref_clear(args_tuple, py_object_type)
+ code.funcstate.release_temp(args_tuple)
+
+ if len(args) == 1:
+ code.putln("}")
+ code.putln("}") # !CYTHON_FAST_PYCALL
+
+ if reuse_function_temp:
+ self.function.generate_disposal_code(code)
+ self.function.free_temps(code)
+ else:
+ code.put_decref_clear(function, py_object_type)
+ code.funcstate.release_temp(function)
+
+
+class InlinedDefNodeCallNode(CallNode):
+ # Inline call to defnode
+ #
+ # function PyCFunctionNode
+ # function_name NameNode
+ # args [ExprNode]
+
+ subexprs = ['args', 'function_name']
+ is_temp = 1
+ type = py_object_type
+ function = None
+ function_name = None
+
+ def can_be_inlined(self):
+ func_type= self.function.def_node
+ if func_type.star_arg or func_type.starstar_arg:
+ return False
+ if len(func_type.args) != len(self.args):
+ return False
+ if func_type.num_kwonly_args:
+ return False # actually wrong number of arguments
+ return True
+
+ def analyse_types(self, env):
+ self.function_name = self.function_name.analyse_types(env)
+
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ func_type = self.function.def_node
+ actual_nargs = len(self.args)
+
+ # Coerce arguments
+ some_args_in_temps = False
+ for i in range(actual_nargs):
+ formal_type = func_type.args[i].type
+ arg = self.args[i].coerce_to(formal_type, env)
+ if arg.is_temp:
+ if i > 0:
+ # first argument in temp doesn't impact subsequent arguments
+ some_args_in_temps = True
+ elif arg.type.is_pyobject and not env.nogil:
+ if arg.nonlocally_immutable():
+ # plain local variables are ok
+ pass
+ else:
+ # we do not safely own the argument's reference,
+ # but we must make sure it cannot be collected
+ # before we return from the function, so we create
+ # an owned temp reference to it
+ if i > 0: # first argument doesn't matter
+ some_args_in_temps = True
+ arg = arg.coerce_to_temp(env)
+ self.args[i] = arg
+
+ if some_args_in_temps:
+ # if some args are temps and others are not, they may get
+ # constructed in the wrong order (temps first) => make
+ # sure they are either all temps or all not temps (except
+ # for the last argument, which is evaluated last in any
+ # case)
+ for i in range(actual_nargs-1):
+ arg = self.args[i]
+ if arg.nonlocally_immutable():
+ # locals, C functions, unassignable types are safe.
+ pass
+ elif arg.type.is_cpp_class:
+ # Assignment has side effects, avoid.
+ pass
+ elif env.nogil and arg.type.is_pyobject:
+ # can't copy a Python reference into a temp in nogil
+ # env (this is safe: a construction would fail in
+ # nogil anyway)
+ pass
+ else:
+ #self.args[i] = arg.coerce_to_temp(env)
+ # instead: issue a warning
+ if i > 0:
+ warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
+ break
+ return self
+
+ def generate_result_code(self, code):
+ arg_code = [self.function_name.py_result()]
+ func_type = self.function.def_node
+ for arg, proto_arg in zip(self.args, func_type.args):
+ if arg.type.is_pyobject:
+ arg_code.append(arg.result_as(proto_arg.type))
+ else:
+ arg_code.append(arg.result())
+ arg_code = ', '.join(arg_code)
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ self.function.def_node.entry.pyfunc_cname,
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class PythonCapiFunctionNode(ExprNode):
+ subexprs = []
+
+ def __init__(self, pos, py_name, cname, func_type, utility_code = None):
+ ExprNode.__init__(self, pos, name=py_name, cname=cname,
+ type=func_type, utility_code=utility_code)
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ if self.utility_code:
+ code.globalstate.use_utility_code(self.utility_code)
+
+ def calculate_result_code(self):
+ return self.cname
+
+
+class PythonCapiCallNode(SimpleCallNode):
+ # Python C-API Function call (only created in transforms)
+
+ # By default, we assume that the call never returns None, as this
+ # is true for most C-API functions in CPython. If this does not
+ # apply to a call, set the following to True (or None to inherit
+ # the default behaviour).
+ may_return_none = False
+
+ def __init__(self, pos, function_name, func_type,
+ utility_code = None, py_name=None, **kwargs):
+ self.type = func_type.return_type
+ self.result_ctype = self.type
+ self.function = PythonCapiFunctionNode(
+ pos, py_name, function_name, func_type,
+ utility_code = utility_code)
+ # call this last so that we can override the constructed
+ # attributes above with explicit keyword arguments if required
+ SimpleCallNode.__init__(self, pos, **kwargs)
+
+
+class CachedBuiltinMethodCallNode(CallNode):
+ # Python call to a method of a known Python builtin (only created in transforms)
+
+ subexprs = ['obj', 'args']
+ is_temp = True
+
+ def __init__(self, call_node, obj, method_name, args):
+ super(CachedBuiltinMethodCallNode, self).__init__(
+ call_node.pos,
+ obj=obj, method_name=method_name, args=args,
+ may_return_none=call_node.may_return_none,
+ type=call_node.type)
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ return ExprNode.may_be_none(self)
+
+ def generate_result_code(self, code):
+ type_cname = self.obj.type.cname
+ obj_cname = self.obj.py_result()
+ args = [arg.py_result() for arg in self.args]
+ call_code = code.globalstate.cached_unbound_method_call_code(
+ obj_cname, type_cname, self.method_name, args)
+ code.putln("%s = %s; %s" % (
+ self.result(), call_code,
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ code.put_gotref(self.result())
+
+
+class GeneralCallNode(CallNode):
+ # General Python function call, including keyword,
+ # * and ** arguments.
+ #
+ # function ExprNode
+ # positional_args ExprNode Tuple of positional arguments
+ # keyword_args ExprNode or None Dict of keyword arguments
+
+ type = py_object_type
+
+ subexprs = ['function', 'positional_args', 'keyword_args']
+
+ nogil_check = Node.gil_error
+
+ def compile_time_value(self, denv):
+ function = self.function.compile_time_value(denv)
+ positional_args = self.positional_args.compile_time_value(denv)
+ keyword_args = self.keyword_args.compile_time_value(denv)
+ try:
+ return function(*positional_args, **keyword_args)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def explicit_args_kwds(self):
+ if (self.keyword_args and not self.keyword_args.is_dict_literal or
+ not self.positional_args.is_sequence_constructor):
+ raise CompileError(self.pos,
+ 'Compile-time keyword arguments must be explicit.')
+ return self.positional_args.args, self.keyword_args
+
+ def analyse_types(self, env):
+ if self.analyse_as_type_constructor(env):
+ return self
+ self.function = self.function.analyse_types(env)
+ if not self.function.type.is_pyobject:
+ if self.function.type.is_error:
+ self.type = error_type
+ return self
+ if hasattr(self.function, 'entry'):
+ node = self.map_to_simple_call_node()
+ if node is not None and node is not self:
+ return node.analyse_types(env)
+ elif self.function.entry.as_variable:
+ self.function = self.function.coerce_to_pyobject(env)
+ elif node is self:
+ error(self.pos,
+ "Non-trivial keyword arguments and starred "
+ "arguments not allowed in cdef functions.")
+ else:
+ # error was already reported
+ pass
+ else:
+ self.function = self.function.coerce_to_pyobject(env)
+ if self.keyword_args:
+ self.keyword_args = self.keyword_args.analyse_types(env)
+ self.positional_args = self.positional_args.analyse_types(env)
+ self.positional_args = \
+ self.positional_args.coerce_to_pyobject(env)
+ self.set_py_result_type(self.function)
+ self.is_temp = 1
+ return self
+
+ def map_to_simple_call_node(self):
+ """
+ Tries to map keyword arguments to declared positional arguments.
+ Returns self to try a Python call, None to report an error
+ or a SimpleCallNode if the mapping succeeds.
+ """
+ if not isinstance(self.positional_args, TupleNode):
+ # has starred argument
+ return self
+ if not self.keyword_args.is_dict_literal:
+ # keywords come from arbitrary expression => nothing to do here
+ return self
+ function = self.function
+ entry = getattr(function, 'entry', None)
+ if not entry:
+ return self
+ function_type = entry.type
+ if function_type.is_ptr:
+ function_type = function_type.base_type
+ if not function_type.is_cfunction:
+ return self
+
+ pos_args = self.positional_args.args
+ kwargs = self.keyword_args
+ declared_args = function_type.args
+ if entry.is_cmethod:
+ declared_args = declared_args[1:] # skip 'self'
+
+ if len(pos_args) > len(declared_args):
+ error(self.pos, "function call got too many positional arguments, "
+ "expected %d, got %s" % (len(declared_args),
+ len(pos_args)))
+ return None
+
+ matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
+ if arg.name ])
+ unmatched_args = declared_args[len(pos_args):]
+ matched_kwargs_count = 0
+ args = list(pos_args)
+
+ # check for duplicate keywords
+ seen = set(matched_args)
+ has_errors = False
+ for arg in kwargs.key_value_pairs:
+ name = arg.key.value
+ if name in seen:
+ error(arg.pos, "argument '%s' passed twice" % name)
+ has_errors = True
+ # continue to report more errors if there are any
+ seen.add(name)
+
+ # match keywords that are passed in order
+ for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
+ name = arg.key.value
+ if decl_arg.name == name:
+ matched_args.add(name)
+ matched_kwargs_count += 1
+ args.append(arg.value)
+ else:
+ break
+
+ # match keyword arguments that are passed out-of-order, but keep
+ # the evaluation of non-simple arguments in order by moving them
+ # into temps
+ from .UtilNodes import EvalWithTempExprNode, LetRefNode
+ temps = []
+ if len(kwargs.key_value_pairs) > matched_kwargs_count:
+ unmatched_args = declared_args[len(args):]
+ keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
+ for i, arg in enumerate(kwargs.key_value_pairs) ])
+ first_missing_keyword = None
+ for decl_arg in unmatched_args:
+ name = decl_arg.name
+ if name not in keywords:
+ # missing keyword argument => either done or error
+ if not first_missing_keyword:
+ first_missing_keyword = name
+ continue
+ elif first_missing_keyword:
+ if entry.as_variable:
+ # we might be able to convert the function to a Python
+ # object, which then allows full calling semantics
+ # with default values in gaps - currently, we only
+ # support optional arguments at the end
+ return self
+ # wasn't the last keyword => gaps are not supported
+ error(self.pos, "C function call is missing "
+ "argument '%s'" % first_missing_keyword)
+ return None
+ pos, arg = keywords[name]
+ matched_args.add(name)
+ matched_kwargs_count += 1
+ if arg.value.is_simple():
+ args.append(arg.value)
+ else:
+ temp = LetRefNode(arg.value)
+ assert temp.is_simple()
+ args.append(temp)
+ temps.append((pos, temp))
+
+ if temps:
+ # may have to move preceding non-simple args into temps
+ final_args = []
+ new_temps = []
+ first_temp_arg = temps[0][-1]
+ for arg_value in args:
+ if arg_value is first_temp_arg:
+ break # done
+ if arg_value.is_simple():
+ final_args.append(arg_value)
+ else:
+ temp = LetRefNode(arg_value)
+ new_temps.append(temp)
+ final_args.append(temp)
+ if new_temps:
+ args = final_args
+ temps = new_temps + [ arg for i,arg in sorted(temps) ]
+
+ # check for unexpected keywords
+ for arg in kwargs.key_value_pairs:
+ name = arg.key.value
+ if name not in matched_args:
+ has_errors = True
+ error(arg.pos,
+ "C function got unexpected keyword argument '%s'" %
+ name)
+
+ if has_errors:
+ # error was reported already
+ return None
+
+ # all keywords mapped to positional arguments
+ # if we are missing arguments, SimpleCallNode will figure it out
+ node = SimpleCallNode(self.pos, function=function, args=args)
+ for temp in temps[::-1]:
+ node = EvalWithTempExprNode(temp, node)
+ return node
+
+ def generate_result_code(self, code):
+ if self.type.is_error: return
+ if self.keyword_args:
+ kwargs = self.keyword_args.py_result()
+ else:
+ kwargs = 'NULL'
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
+ self.result(),
+ self.function.py_result(),
+ self.positional_args.py_result(),
+ kwargs,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class AsTupleNode(ExprNode):
+ # Convert argument to tuple. Used for normalising
+ # the * argument of a function call.
+ #
+ # arg ExprNode
+
+ subexprs = ['arg']
+ is_temp = 1
+
+ def calculate_constant_result(self):
+ self.constant_result = tuple(self.arg.constant_result)
+
+ def compile_time_value(self, denv):
+ arg = self.arg.compile_time_value(denv)
+ try:
+ return tuple(arg)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
+ if self.arg.type is tuple_type:
+ return self.arg.as_none_safe_node("'NoneType' object is not iterable")
+ self.type = tuple_type
+ return self
+
+ def may_be_none(self):
+ return False
+
+ nogil_check = Node.gil_error
+ gil_message = "Constructing Python tuple"
+
+ def generate_result_code(self, code):
+ cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ cfunc, self.arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class MergedDictNode(ExprNode):
+ # Helper class for keyword arguments and other merged dicts.
+ #
+ # keyword_args [DictNode or other ExprNode]
+
+ subexprs = ['keyword_args']
+ is_temp = 1
+ type = dict_type
+ reject_duplicates = True
+
+ def calculate_constant_result(self):
+ result = {}
+ reject_duplicates = self.reject_duplicates
+ for item in self.keyword_args:
+ if item.is_dict_literal:
+ # process items in order
+ items = ((key.constant_result, value.constant_result)
+ for key, value in item.key_value_pairs)
+ else:
+ items = item.constant_result.iteritems()
+
+ for key, value in items:
+ if reject_duplicates and key in result:
+ raise ValueError("duplicate keyword argument found: %s" % key)
+ result[key] = value
+
+ self.constant_result = result
+
+ def compile_time_value(self, denv):
+ result = {}
+ reject_duplicates = self.reject_duplicates
+ for item in self.keyword_args:
+ if item.is_dict_literal:
+ # process items in order
+ items = [(key.compile_time_value(denv), value.compile_time_value(denv))
+ for key, value in item.key_value_pairs]
+ else:
+ items = item.compile_time_value(denv).iteritems()
+
+ try:
+ for key, value in items:
+ if reject_duplicates and key in result:
+ raise ValueError("duplicate keyword argument found: %s" % key)
+ result[key] = value
+ except Exception as e:
+ self.compile_time_value_error(e)
+ return result
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ return dict_type
+
+ def analyse_types(self, env):
+ self.keyword_args = [
+ arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
+ # FIXME: CPython's error message starts with the runtime function name
+ 'argument after ** must be a mapping, not NoneType')
+ for arg in self.keyword_args
+ ]
+
+ return self
+
+ def may_be_none(self):
+ return False
+
+ gil_message = "Constructing Python dict"
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ args = iter(self.keyword_args)
+ item = next(args)
+ item.generate_evaluation_code(code)
+ if item.type is not dict_type:
+ # CPython supports calling functions with non-dicts, so do we
+ code.putln('if (likely(PyDict_CheckExact(%s))) {' %
+ item.py_result())
+
+ if item.is_dict_literal:
+ item.make_owned_reference(code)
+ code.putln("%s = %s;" % (self.result(), item.py_result()))
+ item.generate_post_assignment_code(code)
+ else:
+ code.putln("%s = PyDict_Copy(%s); %s" % (
+ self.result(),
+ item.py_result(),
+ code.error_goto_if_null(self.result(), item.pos)))
+ code.put_gotref(self.result())
+ item.generate_disposal_code(code)
+
+ if item.type is not dict_type:
+ code.putln('} else {')
+ code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % (
+ self.result(),
+ item.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ item.generate_disposal_code(code)
+ code.putln('}')
+ item.free_temps(code)
+
+ helpers = set()
+ for item in args:
+ if item.is_dict_literal:
+ # inline update instead of creating an intermediate dict
+ for arg in item.key_value_pairs:
+ arg.generate_evaluation_code(code)
+ if self.reject_duplicates:
+ code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
+ self.result(),
+ arg.key.py_result()))
+ helpers.add("RaiseDoubleKeywords")
+ # FIXME: find out function name at runtime!
+ code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
+ arg.key.py_result(),
+ code.error_goto(self.pos)))
+ code.putln("}")
+ code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
+ self.result(),
+ arg.key.py_result(),
+ arg.value.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ else:
+ item.generate_evaluation_code(code)
+ if self.reject_duplicates:
+ # merge mapping into kwdict one by one as we need to check for duplicates
+ helpers.add("MergeKeywords")
+ code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
+ self.result(), item.py_result()))
+ else:
+ # simple case, just add all entries
+ helpers.add("RaiseMappingExpected")
+ code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
+ self.result(), item.py_result()))
+ code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
+ "__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
+ code.putln(code.error_goto(item.pos))
+ code.putln("}")
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ for helper in sorted(helpers):
+ code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
+
+ def annotate(self, code):
+ for item in self.keyword_args:
+ item.annotate(code)
+
+
+class AttributeNode(ExprNode):
+ # obj.attribute
+ #
+ # obj ExprNode
+ # attribute string
+ # needs_none_check boolean Used if obj is an extension type.
+ # If set to True, it is known that the type is not None.
+ #
+ # Used internally:
+ #
+ # is_py_attr boolean Is a Python getattr operation
+ # member string C name of struct member
+ # is_called boolean Function call is being done on result
+ # entry Entry Symbol table entry of attribute
+
+ is_attribute = 1
+ subexprs = ['obj']
+
+ type = PyrexTypes.error_type
+ entry = None
+ is_called = 0
+ needs_none_check = True
+ is_memslice_transpose = False
+ is_special_lookup = False
+ is_py_attr = 0
+
+ def as_cython_attribute(self):
+ if (isinstance(self.obj, NameNode) and
+ self.obj.is_cython_module and not
+ self.attribute == u"parallel"):
+ return self.attribute
+
+ cy = self.obj.as_cython_attribute()
+ if cy:
+ return "%s.%s" % (cy, self.attribute)
+ return None
+
+ def coerce_to(self, dst_type, env):
+ # If coercing to a generic pyobject and this is a cpdef function
+ # we can create the corresponding attribute
+ if dst_type is py_object_type:
+ entry = self.entry
+ if entry and entry.is_cfunction and entry.as_variable:
+ # must be a cpdef function
+ self.is_temp = 1
+ self.entry = entry.as_variable
+ self.analyse_as_python_attribute(env)
+ return self
+ return ExprNode.coerce_to(self, dst_type, env)
+
+ def calculate_constant_result(self):
+ attr = self.attribute
+ if attr.startswith("__") and attr.endswith("__"):
+ return
+ self.constant_result = getattr(self.obj.constant_result, attr)
+
+ def compile_time_value(self, denv):
+ attr = self.attribute
+ if attr.startswith("__") and attr.endswith("__"):
+ error(self.pos,
+ "Invalid attribute name '%s' in compile-time expression" % attr)
+ return None
+ obj = self.obj.compile_time_value(denv)
+ try:
+ return getattr(obj, attr)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def type_dependencies(self, env):
+ return self.obj.type_dependencies(env)
+
+ def infer_type(self, env):
+ # FIXME: this is way too redundant with analyse_types()
+ node = self.analyse_as_cimported_attribute_node(env, target=False)
+ if node is not None:
+ if node.entry.type and node.entry.type.is_cfunction:
+ # special-case - function converted to pointer
+ return PyrexTypes.CPtrType(node.entry.type)
+ else:
+ return node.entry.type
+ node = self.analyse_as_type_attribute(env)
+ if node is not None:
+ return node.entry.type
+ obj_type = self.obj.infer_type(env)
+ self.analyse_attribute(env, obj_type=obj_type)
+ if obj_type.is_builtin_type and self.type.is_cfunction:
+ # special case: C-API replacements for C methods of
+ # builtin types cannot be inferred as C functions as
+ # that would prevent their use as bound methods
+ return py_object_type
+ elif self.entry and self.entry.is_cmethod:
+ # special case: bound methods should not be inferred
+ # as their unbound method types
+ return py_object_type
+ return self.type
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def analyse_target_types(self, env):
+ node = self.analyse_types(env, target = 1)
+ if node.type.is_const:
+ error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
+ if not node.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
+ return node
+
+ def analyse_types(self, env, target = 0):
+ self.initialized_check = env.directives['initializedcheck']
+ node = self.analyse_as_cimported_attribute_node(env, target)
+ if node is None and not target:
+ node = self.analyse_as_type_attribute(env)
+ if node is None:
+ node = self.analyse_as_ordinary_attribute_node(env, target)
+ assert node is not None
+ if node.entry:
+ node.entry.used = True
+ if node.is_attribute:
+ node.wrap_obj_in_nonecheck(env)
+ return node
+
+ def analyse_as_cimported_attribute_node(self, env, target):
+ # Try to interpret this as a reference to an imported
+ # C const, type, var or function. If successful, mutates
+ # this node into a NameNode and returns 1, otherwise
+ # returns 0.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and (
+ entry.is_cglobal or entry.is_cfunction
+ or entry.is_type or entry.is_const):
+ return self.as_name_node(env, entry, target)
+ if self.is_cimported_module_without_shadow(env):
+ error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
+ return self
+ return None
+
+ def analyse_as_type_attribute(self, env):
+ # Try to interpret this as a reference to an unbound
+ # C method of an extension type or builtin type. If successful,
+ # creates a corresponding NameNode and returns it, otherwise
+ # returns None.
+ if self.obj.is_string_literal:
+ return
+ type = self.obj.analyse_as_type(env)
+ if type:
+ if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
+ entry = type.scope.lookup_here(self.attribute)
+ if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
+ if type.is_builtin_type:
+ if not self.is_called:
+ # must handle this as Python object
+ return None
+ ubcm_entry = entry
+ else:
+ # Create a temporary entry describing the C method
+ # as an ordinary function.
+ if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
+ cname = entry.func_cname
+ if entry.type.is_static_method or (
+ env.parent_scope and env.parent_scope.is_cpp_class_scope):
+ ctype = entry.type
+ elif type.is_cpp_class:
+ error(self.pos, "%s not a static member of %s" % (entry.name, type))
+ ctype = PyrexTypes.error_type
+ else:
+ # Fix self type.
+ ctype = copy.copy(entry.type)
+ ctype.args = ctype.args[:]
+ ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
+ else:
+ cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
+ ctype = entry.type
+ ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
+ ubcm_entry.is_cfunction = 1
+ ubcm_entry.func_cname = entry.func_cname
+ ubcm_entry.is_unbound_cmethod = 1
+ ubcm_entry.scope = entry.scope
+ return self.as_name_node(env, ubcm_entry, target=False)
+ elif type.is_enum:
+ if self.attribute in type.values:
+ for entry in type.entry.enum_values:
+ if entry.name == self.attribute:
+ return self.as_name_node(env, entry, target=False)
+ else:
+ error(self.pos, "%s not a known value of %s" % (self.attribute, type))
+ else:
+ error(self.pos, "%s not a known value of %s" % (self.attribute, type))
+ return None
+
+ def analyse_as_type(self, env):
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ return module_scope.lookup_type(self.attribute)
+ if not self.obj.is_string_literal:
+ base_type = self.obj.analyse_as_type(env)
+ if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
+ return base_type.scope.lookup_type(self.attribute)
+ return None
+
+ def analyse_as_extension_type(self, env):
+ # Try to interpret this as a reference to an extension type
+ # in a cimported module. Returns the extension type, or None.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and entry.is_type:
+ if entry.type.is_extension_type or entry.type.is_builtin_type:
+ return entry.type
+ return None
+
+ def analyse_as_module(self, env):
+ # Try to interpret this as a reference to a cimported module
+ # in another cimported module. Returns the module scope, or None.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and entry.as_module:
+ return entry.as_module
+ return None
+
+ def as_name_node(self, env, entry, target):
+ # Create a corresponding NameNode from this node and complete the
+ # analyse_types phase.
+ node = NameNode.from_node(self, name=self.attribute, entry=entry)
+ if target:
+ node = node.analyse_target_types(env)
+ else:
+ node = node.analyse_rvalue_entry(env)
+ node.entry.used = 1
+ return node
+
+ def analyse_as_ordinary_attribute_node(self, env, target):
+ self.obj = self.obj.analyse_types(env)
+ self.analyse_attribute(env)
+ if self.entry and self.entry.is_cmethod and not self.is_called:
+# error(self.pos, "C method can only be called")
+ pass
+ ## Reference to C array turns into pointer to first element.
+ #while self.type.is_array:
+ # self.type = self.type.element_ptr_type()
+ if self.is_py_attr:
+ if not target:
+ self.is_temp = 1
+ self.result_ctype = py_object_type
+ elif target and self.obj.type.is_builtin_type:
+ error(self.pos, "Assignment to an immutable object field")
+ #elif self.type.is_memoryviewslice and not target:
+ # self.is_temp = True
+ return self
+
+ def analyse_attribute(self, env, obj_type = None):
+ # Look up attribute and set self.type and self.member.
+ immutable_obj = obj_type is not None # used during type inference
+ self.is_py_attr = 0
+ self.member = self.attribute
+ if obj_type is None:
+ if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ obj_type = self.obj.type
+ else:
+ if obj_type.is_string or obj_type.is_pyunicode_ptr:
+ obj_type = py_object_type
+ if obj_type.is_ptr or obj_type.is_array:
+ obj_type = obj_type.base_type
+ self.op = "->"
+ elif obj_type.is_extension_type or obj_type.is_builtin_type:
+ self.op = "->"
+ elif obj_type.is_reference and obj_type.is_fake_reference:
+ self.op = "->"
+ else:
+ self.op = "."
+ if obj_type.has_attributes:
+ if obj_type.attributes_known():
+ entry = obj_type.scope.lookup_here(self.attribute)
+ if obj_type.is_memoryviewslice and not entry:
+ if self.attribute == 'T':
+ self.is_memslice_transpose = True
+ self.is_temp = True
+ self.use_managed_ref = True
+ self.type = self.obj.type.transpose(self.pos)
+ return
+ else:
+ obj_type.declare_attribute(self.attribute, env, self.pos)
+ entry = obj_type.scope.lookup_here(self.attribute)
+ if entry and entry.is_member:
+ entry = None
+ else:
+ error(self.pos,
+ "Cannot select attribute of incomplete type '%s'"
+ % obj_type)
+ self.type = PyrexTypes.error_type
+ return
+ self.entry = entry
+ if entry:
+ if obj_type.is_extension_type and entry.name == "__weakref__":
+ error(self.pos, "Illegal use of special attribute __weakref__")
+
+ # def methods need the normal attribute lookup
+ # because they do not have struct entries
+ # fused function go through assignment synthesis
+ # (foo = pycfunction(foo_func_obj)) and need to go through
+ # regular Python lookup as well
+ if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
+ self.type = entry.type
+ self.member = entry.cname
+ return
+ else:
+ # If it's not a variable or C method, it must be a Python
+ # method of an extension type, so we treat it like a Python
+ # attribute.
+ pass
+ # If we get here, the base object is not a struct/union/extension
+ # type, or it is an extension type and the attribute is either not
+ # declared or is declared as a Python method. Treat it as a Python
+ # attribute reference.
+ self.analyse_as_python_attribute(env, obj_type, immutable_obj)
+
+ def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
+ if obj_type is None:
+ obj_type = self.obj.type
+ # mangle private '__*' Python attributes used inside of a class
+ self.attribute = env.mangle_class_private_name(self.attribute)
+ self.member = self.attribute
+ self.type = py_object_type
+ self.is_py_attr = 1
+
+ if not obj_type.is_pyobject and not obj_type.is_error:
+ # Expose python methods for immutable objects.
+ if (obj_type.is_string or obj_type.is_cpp_string
+ or obj_type.is_buffer or obj_type.is_memoryviewslice
+ or obj_type.is_numeric
+ or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
+ or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
+ if not immutable_obj:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
+ and self.obj.entry.as_variable
+ and self.obj.entry.as_variable.type.is_pyobject):
+ # might be an optimised builtin function => unpack it
+ if not immutable_obj:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ else:
+ error(self.pos,
+ "Object of type '%s' has no attribute '%s'" %
+ (obj_type, self.attribute))
+
+ def wrap_obj_in_nonecheck(self, env):
+ if not env.directives['nonecheck']:
+ return
+
+ msg = None
+ format_args = ()
+ if (self.obj.type.is_extension_type and self.needs_none_check and not
+ self.is_py_attr):
+ msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
+ format_args = (self.attribute,)
+ elif self.obj.type.is_memoryviewslice:
+ if self.is_memslice_transpose:
+ msg = "Cannot transpose None memoryview slice"
+ else:
+ entry = self.obj.type.scope.lookup_here(self.attribute)
+ if entry:
+ # copy/is_c_contig/shape/strides etc
+ msg = "Cannot access '%s' attribute of None memoryview slice"
+ format_args = (entry.name,)
+
+ if msg:
+ self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
+ format_args=format_args)
+
+ def nogil_check(self, env):
+ if self.is_py_attr:
+ self.gil_error()
+
+ gil_message = "Accessing Python attribute"
+
+ def is_cimported_module_without_shadow(self, env):
+ return self.obj.is_cimported_module_without_shadow(env)
+
+ def is_simple(self):
+ if self.obj:
+ return self.result_in_temp() or self.obj.is_simple()
+ else:
+ return NameNode.is_simple(self)
+
+ def is_lvalue(self):
+ if self.obj:
+ return True
+ else:
+ return NameNode.is_lvalue(self)
+
+ def is_ephemeral(self):
+ if self.obj:
+ return self.obj.is_ephemeral()
+ else:
+ return NameNode.is_ephemeral(self)
+
+ def calculate_result_code(self):
+ #print "AttributeNode.calculate_result_code:", self.member ###
+ #print "...obj node =", self.obj, "code", self.obj.result() ###
+ #print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
+ obj = self.obj
+ obj_code = obj.result_as(obj.type)
+ #print "...obj_code =", obj_code ###
+ if self.entry and self.entry.is_cmethod:
+ if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
+ if self.entry.final_func_cname:
+ return self.entry.final_func_cname
+
+ if self.type.from_fused:
+ # If the attribute was specialized through indexing, make
+ # sure to get the right fused name, as our entry was
+ # replaced by our parent index node
+ # (AnalyseExpressionsTransform)
+ self.member = self.entry.cname
+
+ return "((struct %s *)%s%s%s)->%s" % (
+ obj.type.vtabstruct_cname, obj_code, self.op,
+ obj.type.vtabslot_cname, self.member)
+ elif self.result_is_used:
+ return self.member
+ # Generating no code at all for unused access to optimised builtin
+ # methods fixes the problem that some optimisations only exist as
+ # macros, i.e. there is no function pointer to them, so we would
+ # generate invalid C code here.
+ return
+ elif obj.type.is_complex:
+ return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
+ else:
+ if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
+ # accessing a field of a builtin type, need to cast better than result_as() does
+ obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
+ return "%s%s%s" % (obj_code, self.op, self.member)
+
+ def generate_result_code(self, code):
+ if self.is_py_attr:
+ if self.is_special_lookup:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
+ lookup_func_name = '__Pyx_PyObject_LookupSpecial'
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
+ lookup_func_name = '__Pyx_PyObject_GetAttrStr'
+ code.putln(
+ '%s = %s(%s, %s); %s' % (
+ self.result(),
+ lookup_func_name,
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ elif self.type.is_memoryviewslice:
+ if self.is_memslice_transpose:
+ # transpose the slice
+ for access, packing in self.type.axes:
+ if access == 'ptr':
+ error(self.pos, "Transposing not supported for slices "
+ "with indirect dimensions")
+ return
+
+ code.putln("%s = %s;" % (self.result(), self.obj.result()))
+ code.put_incref_memoryviewslice(self.result(), have_gil=True)
+
+ T = "__pyx_memslice_transpose(&%s) == 0"
+ code.putln(code.error_goto_if(T % self.result(), self.pos))
+ elif self.initialized_check:
+ code.putln(
+ 'if (unlikely(!%s.memview)) {'
+ 'PyErr_SetString(PyExc_AttributeError,'
+ '"Memoryview is not initialized");'
+ '%s'
+ '}' % (self.result(), code.error_goto(self.pos)))
+ else:
+ # result_code contains what is needed, but we may need to insert
+ # a check and raise an exception
+ if self.obj.type and self.obj.type.is_extension_type:
+ pass
+ elif self.entry and self.entry.is_cmethod:
+ # C method implemented as function call with utility code
+ code.globalstate.use_entry_utility_code(self.entry)
+
+ def generate_disposal_code(self, code):
+ if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
+ # mirror condition for putting the memview incref here:
+ code.put_xdecref_memoryviewslice(
+ self.result(), have_gil=True)
+ code.putln("%s.memview = NULL;" % self.result())
+ code.putln("%s.data = NULL;" % self.result())
+ else:
+ ExprNode.generate_disposal_code(self, code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.obj.generate_evaluation_code(code)
+ if self.is_py_attr:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos,
+ '__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute),
+ rhs.py_result()))
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ elif self.obj.type.is_complex:
+ code.putln("__Pyx_SET_C%s(%s, %s);" % (
+ self.member.upper(),
+ self.obj.result_as(self.obj.type),
+ rhs.result_as(self.ctype())))
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ else:
+ select_code = self.result()
+ if self.type.is_pyobject and self.use_managed_ref:
+ rhs.make_owned_reference(code)
+ code.put_giveref(rhs.py_result())
+ code.put_gotref(select_code)
+ code.put_decref(select_code, self.ctype())
+ elif self.type.is_memoryviewslice:
+ from . import MemoryView
+ MemoryView.put_assign_to_memviewslice(
+ select_code, rhs, rhs.result(), self.type, code)
+
+ if not self.type.is_memoryviewslice:
+ code.putln(
+ "%s = %s;" % (
+ select_code,
+ rhs.result_as(self.ctype())))
+ #rhs.result()))
+ rhs.generate_post_assignment_code(code)
+ rhs.free_temps(code)
+ self.obj.generate_disposal_code(code)
+ self.obj.free_temps(code)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ self.obj.generate_evaluation_code(code)
+ if self.is_py_attr or (self.entry.scope.is_property_scope
+ and u'__del__' in self.entry.scope.entries):
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos,
+ '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute)))
+ else:
+ error(self.pos, "Cannot delete C attribute of extension type")
+ self.obj.generate_disposal_code(code)
+ self.obj.free_temps(code)
+
+ def annotate(self, code):
+ if self.is_py_attr:
+ style, text = 'py_attr', 'python attribute (%s)'
+ else:
+ style, text = 'c_attr', 'c attribute (%s)'
+ code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
+
+
+#-------------------------------------------------------------------
+#
+# Constructor nodes
+#
+#-------------------------------------------------------------------
+
+class StarredUnpackingNode(ExprNode):
+ # A starred expression like "*a"
+ #
+ # This is only allowed in sequence assignment or construction such as
+ #
+ # a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
+ #
+ # and will be special cased during type analysis (or generate an error
+ # if it's found at unexpected places).
+ #
+ # target ExprNode
+
+ subexprs = ['target']
+ is_starred = 1
+ type = py_object_type
+ is_temp = 1
+ starred_expr_allowed_here = False
+
+ def __init__(self, pos, target):
+ ExprNode.__init__(self, pos, target=target)
+
+ def analyse_declarations(self, env):
+ if not self.starred_expr_allowed_here:
+ error(self.pos, "starred expression is not allowed here")
+ self.target.analyse_declarations(env)
+
+ def infer_type(self, env):
+ return self.target.infer_type(env)
+
+ def analyse_types(self, env):
+ if not self.starred_expr_allowed_here:
+ error(self.pos, "starred expression is not allowed here")
+ self.target = self.target.analyse_types(env)
+ self.type = self.target.type
+ return self
+
+ def analyse_target_declaration(self, env):
+ self.target.analyse_target_declaration(env)
+
+ def analyse_target_types(self, env):
+ self.target = self.target.analyse_target_types(env)
+ self.type = self.target.type
+ return self
+
+ def calculate_result_code(self):
+ return ""
+
+ def generate_result_code(self, code):
+ pass
+
+
+class SequenceNode(ExprNode):
+ # Base class for list and tuple constructor nodes.
+ # Contains common code for performing sequence unpacking.
+ #
+ # args [ExprNode]
+ # unpacked_items [ExprNode] or None
+ # coerced_unpacked_items [ExprNode] or None
+ # mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
+
+ subexprs = ['args', 'mult_factor']
+
+ is_sequence_constructor = 1
+ unpacked_items = None
+ mult_factor = None
+ slow = False # trade speed for code size (e.g. use PyTuple_Pack())
+
+ def compile_time_value_list(self, denv):
+ return [arg.compile_time_value(denv) for arg in self.args]
+
+ def replace_starred_target_node(self):
+ # replace a starred node in the targets by the contained expression
+ self.starred_assignment = False
+ args = []
+ for arg in self.args:
+ if arg.is_starred:
+ if self.starred_assignment:
+ error(arg.pos, "more than 1 starred expression in assignment")
+ self.starred_assignment = True
+ arg = arg.target
+ arg.is_starred = True
+ args.append(arg)
+ self.args = args
+
+ def analyse_target_declaration(self, env):
+ self.replace_starred_target_node()
+ for arg in self.args:
+ arg.analyse_target_declaration(env)
+
+ def analyse_types(self, env, skip_children=False):
+ for i, arg in enumerate(self.args):
+ if not skip_children:
+ arg = arg.analyse_types(env)
+ self.args[i] = arg.coerce_to_pyobject(env)
+ if self.mult_factor:
+ self.mult_factor = self.mult_factor.analyse_types(env)
+ if not self.mult_factor.type.is_int:
+ self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
+ self.is_temp = 1
+ # not setting self.type here, subtypes do this
+ return self
+
+ def coerce_to_ctuple(self, dst_type, env):
+ if self.type == dst_type:
+ return self
+ assert not self.mult_factor
+ if len(self.args) != dst_type.size:
+ error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
+ dst_type.size, len(self.args)))
+ coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
+ return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
+
+ def _create_merge_node_if_necessary(self, env):
+ self._flatten_starred_args()
+ if not any(arg.is_starred for arg in self.args):
+ return self
+ # convert into MergedSequenceNode by building partial sequences
+ args = []
+ values = []
+ for arg in self.args:
+ if arg.is_starred:
+ if values:
+ args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
+ values = []
+ args.append(arg.target)
+ else:
+ values.append(arg)
+ if values:
+ args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
+ node = MergedSequenceNode(self.pos, args, self.type)
+ if self.mult_factor:
+ node = binop_node(
+ self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
+ inplace=True, type=self.type, is_temp=True)
+ return node
+
+ def _flatten_starred_args(self):
+ args = []
+ for arg in self.args:
+ if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
+ args.extend(arg.target.args)
+ else:
+ args.append(arg)
+ self.args[:] = args
+
+ def may_be_none(self):
+ return False
+
+ def analyse_target_types(self, env):
+ if self.mult_factor:
+ error(self.pos, "can't assign to multiplied sequence")
+ self.unpacked_items = []
+ self.coerced_unpacked_items = []
+ self.any_coerced_items = False
+ for i, arg in enumerate(self.args):
+ arg = self.args[i] = arg.analyse_target_types(env)
+ if arg.is_starred:
+ if not arg.type.assignable_from(list_type):
+ error(arg.pos,
+ "starred target must have Python object (list) type")
+ if arg.type is py_object_type:
+ arg.type = list_type
+ unpacked_item = PyTempNode(self.pos, env)
+ coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
+ if unpacked_item is not coerced_unpacked_item:
+ self.any_coerced_items = True
+ self.unpacked_items.append(unpacked_item)
+ self.coerced_unpacked_items.append(coerced_unpacked_item)
+ self.type = py_object_type
+ return self
+
+ def generate_result_code(self, code):
+ self.generate_operation_code(code)
+
+ def generate_sequence_packing_code(self, code, target=None, plain=False):
+ if target is None:
+ target = self.result()
+ size_factor = c_mult = ''
+ mult_factor = None
+
+ if self.mult_factor and not plain:
+ mult_factor = self.mult_factor
+ if mult_factor.type.is_int:
+ c_mult = mult_factor.result()
+ if (isinstance(mult_factor.constant_result, _py_int_types) and
+ mult_factor.constant_result > 0):
+ size_factor = ' * %s' % mult_factor.constant_result
+ elif mult_factor.type.signed:
+ size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
+ else:
+ size_factor = ' * (%s)' % (c_mult,)
+
+ if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
+ # use PyTuple_Pack() to avoid generating huge amounts of one-time code
+ code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
+ target,
+ len(self.args),
+ ', '.join(arg.py_result() for arg in self.args),
+ code.error_goto_if_null(target, self.pos)))
+ code.put_gotref(target)
+ elif self.type.is_ctuple:
+ for i, arg in enumerate(self.args):
+ code.putln("%s.f%s = %s;" % (
+ target, i, arg.result()))
+ else:
+ # build the tuple/list step by step, potentially multiplying it as we go
+ if self.type is list_type:
+ create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
+ elif self.type is tuple_type:
+ create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
+ else:
+ raise InternalError("sequence packing for unexpected type %s" % self.type)
+ arg_count = len(self.args)
+ code.putln("%s = %s(%s%s); %s" % (
+ target, create_func, arg_count, size_factor,
+ code.error_goto_if_null(target, self.pos)))
+ code.put_gotref(target)
+
+ if c_mult:
+ # FIXME: can't use a temp variable here as the code may
+ # end up in the constant building function. Temps
+ # currently don't work there.
+
+ #counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
+ counter = Naming.quick_temp_cname
+ code.putln('{ Py_ssize_t %s;' % counter)
+ if arg_count == 1:
+ offset = counter
+ else:
+ offset = '%s * %s' % (counter, arg_count)
+ code.putln('for (%s=0; %s < %s; %s++) {' % (
+ counter, counter, c_mult, counter
+ ))
+ else:
+ offset = ''
+
+ for i in range(arg_count):
+ arg = self.args[i]
+ if c_mult or not arg.result_in_temp():
+ code.put_incref(arg.result(), arg.ctype())
+ code.put_giveref(arg.py_result())
+ code.putln("%s(%s, %s, %s);" % (
+ set_item_func,
+ target,
+ (offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
+ arg.py_result()))
+
+ if c_mult:
+ code.putln('}')
+ #code.funcstate.release_temp(counter)
+ code.putln('}')
+
+ if mult_factor is not None and mult_factor.type.is_pyobject:
+ code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
+ Naming.quick_temp_cname, target, mult_factor.py_result(),
+ code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
+ ))
+ code.put_gotref(Naming.quick_temp_cname)
+ code.put_decref(target, py_object_type)
+ code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
+ code.putln('}')
+
+ def generate_subexpr_disposal_code(self, code):
+ if self.mult_factor and self.mult_factor.type.is_int:
+ super(SequenceNode, self).generate_subexpr_disposal_code(code)
+ elif self.type is tuple_type and (self.is_literal or self.slow):
+ super(SequenceNode, self).generate_subexpr_disposal_code(code)
+ else:
+ # We call generate_post_assignment_code here instead
+ # of generate_disposal_code, because values were stored
+ # in the tuple using a reference-stealing operation.
+ for arg in self.args:
+ arg.generate_post_assignment_code(code)
+ # Should NOT call free_temps -- this is invoked by the default
+ # generate_evaluation_code which will do that.
+ if self.mult_factor:
+ self.mult_factor.generate_disposal_code(code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ if self.starred_assignment:
+ self.generate_starred_assignment_code(rhs, code)
+ else:
+ self.generate_parallel_assignment_code(rhs, code)
+
+ for item in self.unpacked_items:
+ item.release(code)
+ rhs.free_temps(code)
+
+ _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
+ ]))
+
+ def generate_parallel_assignment_code(self, rhs, code):
+ # Need to work around the fact that generate_evaluation_code
+ # allocates the temps in a rather hacky way -- the assignment
+ # is evaluated twice, within each if-block.
+ for item in self.unpacked_items:
+ item.allocate(code)
+ special_unpack = (rhs.type is py_object_type
+ or rhs.type in (tuple_type, list_type)
+ or not rhs.type.is_builtin_type)
+ long_enough_for_a_loop = len(self.unpacked_items) > 3
+
+ if special_unpack:
+ self.generate_special_parallel_unpacking_code(
+ code, rhs, use_loop=long_enough_for_a_loop)
+ else:
+ code.putln("{")
+ self.generate_generic_parallel_unpacking_code(
+ code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
+ code.putln("}")
+
+ for value_node in self.coerced_unpacked_items:
+ value_node.generate_evaluation_code(code)
+ for i in range(len(self.args)):
+ self.args[i].generate_assignment_code(
+ self.coerced_unpacked_items[i], code)
+
+ def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
+ sequence_type_test = '1'
+ none_check = "likely(%s != Py_None)" % rhs.py_result()
+ if rhs.type is list_type:
+ sequence_types = ['List']
+ if rhs.may_be_none():
+ sequence_type_test = none_check
+ elif rhs.type is tuple_type:
+ sequence_types = ['Tuple']
+ if rhs.may_be_none():
+ sequence_type_test = none_check
+ else:
+ sequence_types = ['Tuple', 'List']
+ tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
+ list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
+ sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
+
+ code.putln("if (%s) {" % sequence_type_test)
+ code.putln("PyObject* sequence = %s;" % rhs.py_result())
+
+ # list/tuple => check size
+ code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
+ code.putln("if (unlikely(size != %d)) {" % len(self.args))
+ code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
+ code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
+ len(self.args), len(self.args)))
+ code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
+ code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
+ # < 0 => exception
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
+ # unpack items from list/tuple in unrolled loop (can't fail)
+ if len(sequence_types) == 2:
+ code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
+ item.result(), sequence_types[0], i))
+ if len(sequence_types) == 2:
+ code.putln("} else {")
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
+ item.result(), sequence_types[1], i))
+ code.putln("}")
+ for item in self.unpacked_items:
+ code.put_incref(item.result(), item.ctype())
+
+ code.putln("#else")
+ # in non-CPython, use the PySequence protocol (which can fail)
+ if not use_loop:
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
+ item.result(), i,
+ code.error_goto_if_null(item.result(), self.pos)))
+ code.put_gotref(item.result())
+ else:
+ code.putln("{")
+ code.putln("Py_ssize_t i;")
+ code.putln("PyObject** temps[%s] = {%s};" % (
+ len(self.unpacked_items),
+ ','.join(['&%s' % item.result() for item in self.unpacked_items])))
+ code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
+ code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
+ code.error_goto_if_null('item', self.pos)))
+ code.put_gotref('item')
+ code.putln("*(temps[i]) = item;")
+ code.putln("}")
+ code.putln("}")
+
+ code.putln("#endif")
+ rhs.generate_disposal_code(code)
+
+ if sequence_type_test == '1':
+ code.putln("}") # all done
+ elif sequence_type_test == none_check:
+ # either tuple/list or None => save some code by generating the error directly
+ code.putln("} else {")
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
+ code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
+ code.putln("}") # all done
+ else:
+ code.putln("} else {") # needs iteration fallback code
+ self.generate_generic_parallel_unpacking_code(
+ code, rhs, self.unpacked_items, use_loop=use_loop)
+ code.putln("}")
+
+ def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
+ code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
+ code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
+
+ if use_loop:
+ code.putln("PyObject** temps[%s] = {%s};" % (
+ len(self.unpacked_items),
+ ','.join(['&%s' % item.result() for item in unpacked_items])))
+
+ iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln(
+ "%s = PyObject_GetIter(%s); %s" % (
+ iterator_temp,
+ rhs.py_result(),
+ code.error_goto_if_null(iterator_temp, self.pos)))
+ code.put_gotref(iterator_temp)
+ rhs.generate_disposal_code(code)
+
+ iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
+ code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
+ iternext_func, iterator_temp))
+
+ unpacking_error_label = code.new_label('unpacking_failed')
+ unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
+ if use_loop:
+ code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
+ code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
+ code.put_goto(unpacking_error_label)
+ code.put_gotref("item")
+ code.putln("*(temps[index]) = item;")
+ code.putln("}")
+ else:
+ for i, item in enumerate(unpacked_items):
+ code.put(
+ "index = %d; %s = %s; if (unlikely(!%s)) " % (
+ i,
+ item.result(),
+ unpack_code,
+ item.result()))
+ code.put_goto(unpacking_error_label)
+ code.put_gotref(item.py_result())
+
+ if terminate:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
+ unpack_code,
+ len(unpacked_items)))
+ code.putln("%s = NULL;" % iternext_func)
+ code.put_decref_clear(iterator_temp, py_object_type)
+
+ unpacking_done_label = code.new_label('unpacking_done')
+ code.put_goto(unpacking_done_label)
+
+ code.put_label(unpacking_error_label)
+ code.put_decref_clear(iterator_temp, py_object_type)
+ code.putln("%s = NULL;" % iternext_func)
+ code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
+ code.putln(code.error_goto(self.pos))
+ code.put_label(unpacking_done_label)
+
+ code.funcstate.release_temp(iternext_func)
+ if terminate:
+ code.funcstate.release_temp(iterator_temp)
+ iterator_temp = None
+
+ return iterator_temp
+
+ def generate_starred_assignment_code(self, rhs, code):
+ for i, arg in enumerate(self.args):
+ if arg.is_starred:
+ starred_target = self.unpacked_items[i]
+ unpacked_fixed_items_left = self.unpacked_items[:i]
+ unpacked_fixed_items_right = self.unpacked_items[i+1:]
+ break
+ else:
+ assert False
+
+ iterator_temp = None
+ if unpacked_fixed_items_left:
+ for item in unpacked_fixed_items_left:
+ item.allocate(code)
+ code.putln('{')
+ iterator_temp = self.generate_generic_parallel_unpacking_code(
+ code, rhs, unpacked_fixed_items_left,
+ use_loop=True, terminate=False)
+ for i, item in enumerate(unpacked_fixed_items_left):
+ value_node = self.coerced_unpacked_items[i]
+ value_node.generate_evaluation_code(code)
+ code.putln('}')
+
+ starred_target.allocate(code)
+ target_list = starred_target.result()
+ code.putln("%s = PySequence_List(%s); %s" % (
+ target_list,
+ iterator_temp or rhs.py_result(),
+ code.error_goto_if_null(target_list, self.pos)))
+ code.put_gotref(target_list)
+
+ if iterator_temp:
+ code.put_decref_clear(iterator_temp, py_object_type)
+ code.funcstate.release_temp(iterator_temp)
+ else:
+ rhs.generate_disposal_code(code)
+
+ if unpacked_fixed_items_right:
+ code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
+ length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
+ code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
+ code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
+ len(unpacked_fixed_items_left), length_temp,
+ code.error_goto(self.pos)))
+ code.putln('}')
+
+ for item in unpacked_fixed_items_right[::-1]:
+ item.allocate(code)
+ for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
+ self.coerced_unpacked_items[::-1])):
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
+ item.py_result(), target_list, length_temp, i+1))
+ # resize the list the hard way
+ code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
+ code.putln('#else')
+ code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
+ item.py_result(), target_list, length_temp, i+1))
+ code.putln('#endif')
+ code.put_gotref(item.py_result())
+ coerced_arg.generate_evaluation_code(code)
+
+ code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
+ sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
+ sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
+ code.error_goto_if_null(sublist_temp, self.pos)))
+ code.put_gotref(sublist_temp)
+ code.funcstate.release_temp(length_temp)
+ code.put_decref(target_list, py_object_type)
+ code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
+ code.putln('#else')
+ code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable
+ code.funcstate.release_temp(sublist_temp)
+ code.putln('#endif')
+
+ for i, arg in enumerate(self.args):
+ arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
+
+ def annotate(self, code):
+ for arg in self.args:
+ arg.annotate(code)
+ if self.unpacked_items:
+ for arg in self.unpacked_items:
+ arg.annotate(code)
+ for arg in self.coerced_unpacked_items:
+ arg.annotate(code)
+
+
+class TupleNode(SequenceNode):
+ # Tuple constructor.
+
+ type = tuple_type
+ is_partly_literal = False
+
+ gil_message = "Constructing Python tuple"
+
+ def infer_type(self, env):
+ if self.mult_factor or not self.args:
+ return tuple_type
+ arg_types = [arg.infer_type(env) for arg in self.args]
+ if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
+ for type in arg_types):
+ return tuple_type
+ return env.declare_tuple_type(self.pos, arg_types).type
+
+ def analyse_types(self, env, skip_children=False):
+ if len(self.args) == 0:
+ self.is_temp = False
+ self.is_literal = True
+ return self
+
+ if not skip_children:
+ for i, arg in enumerate(self.args):
+ if arg.is_starred:
+ arg.starred_expr_allowed_here = True
+ self.args[i] = arg.analyse_types(env)
+ if (not self.mult_factor and
+ not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
+ for arg in self.args)):
+ self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
+ self.is_temp = 1
+ return self
+
+ node = SequenceNode.analyse_types(self, env, skip_children=True)
+ node = node._create_merge_node_if_necessary(env)
+ if not node.is_sequence_constructor:
+ return node
+
+ if not all(child.is_literal for child in node.args):
+ return node
+ if not node.mult_factor or (
+ node.mult_factor.is_literal and
+ isinstance(node.mult_factor.constant_result, _py_int_types)):
+ node.is_temp = False
+ node.is_literal = True
+ else:
+ if not node.mult_factor.type.is_pyobject:
+ node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
+ node.is_temp = True
+ node.is_partly_literal = True
+ return node
+
+ def analyse_as_type(self, env):
+ # ctuple type
+ if not self.args:
+ return None
+ item_types = [arg.analyse_as_type(env) for arg in self.args]
+ if any(t is None for t in item_types):
+ return None
+ entry = env.declare_tuple_type(self.pos, item_types)
+ return entry.type
+
+ def coerce_to(self, dst_type, env):
+ if self.type.is_ctuple:
+ if dst_type.is_ctuple and self.type.size == dst_type.size:
+ return self.coerce_to_ctuple(dst_type, env)
+ elif dst_type is tuple_type or dst_type is py_object_type:
+ coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
+ return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
+ else:
+ return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
+ elif dst_type.is_ctuple and not self.mult_factor:
+ return self.coerce_to_ctuple(dst_type, env)
+ else:
+ return SequenceNode.coerce_to(self, dst_type, env)
+
+ def as_list(self):
+ t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
+ if isinstance(self.constant_result, tuple):
+ t.constant_result = list(self.constant_result)
+ return t
+
+ def is_simple(self):
+ # either temp or constant => always simple
+ return True
+
+ def nonlocally_immutable(self):
+ # either temp or constant => always safe
+ return True
+
+ def calculate_result_code(self):
+ if len(self.args) > 0:
+ return self.result_code
+ else:
+ return Naming.empty_tuple
+
+ def calculate_constant_result(self):
+ self.constant_result = tuple([
+ arg.constant_result for arg in self.args])
+
+ def compile_time_value(self, denv):
+ values = self.compile_time_value_list(denv)
+ try:
+ return tuple(values)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def generate_operation_code(self, code):
+ if len(self.args) == 0:
+ # result_code is Naming.empty_tuple
+ return
+
+ if self.is_literal or self.is_partly_literal:
+ # The "mult_factor" is part of the deduplication if it is also constant, i.e. when
+ # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part.
+ dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args)
+ tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key)
+ const_code = code.get_cached_constants_writer(tuple_target)
+ if const_code is not None:
+ # constant is not yet initialised
+ const_code.mark_pos(self.pos)
+ self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
+ const_code.put_giveref(tuple_target)
+ if self.is_literal:
+ self.result_code = tuple_target
+ else:
+ code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ code.put_gotref(self.py_result())
+ else:
+ self.type.entry.used = True
+ self.generate_sequence_packing_code(code)
+
+
+class ListNode(SequenceNode):
+ # List constructor.
+
+ # obj_conversion_errors [PyrexError] used internally
+ # orignial_args [ExprNode] used internally
+
+ obj_conversion_errors = []
+ type = list_type
+ in_module_scope = False
+
+ gil_message = "Constructing Python list"
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ # TODO: Infer non-object list arrays.
+ return list_type
+
+ def analyse_expressions(self, env):
+ for arg in self.args:
+ if arg.is_starred:
+ arg.starred_expr_allowed_here = True
+ node = SequenceNode.analyse_expressions(self, env)
+ return node.coerce_to_pyobject(env)
+
+ def analyse_types(self, env):
+ with local_errors(ignore=True) as errors:
+ self.original_args = list(self.args)
+ node = SequenceNode.analyse_types(self, env)
+ node.obj_conversion_errors = errors
+ if env.is_module_scope:
+ self.in_module_scope = True
+ node = node._create_merge_node_if_necessary(env)
+ return node
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject:
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.obj_conversion_errors = []
+ if not self.type.subtype_of(dst_type):
+ error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
+ elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
+ array_length = len(self.args)
+ if self.mult_factor:
+ if isinstance(self.mult_factor.constant_result, _py_int_types):
+ if self.mult_factor.constant_result <= 0:
+ error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
+ else:
+ array_length *= self.mult_factor.constant_result
+ else:
+ error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
+ base_type = dst_type.base_type
+ self.type = PyrexTypes.CArrayType(base_type, array_length)
+ for i in range(len(self.original_args)):
+ arg = self.args[i]
+ if isinstance(arg, CoerceToPyTypeNode):
+ arg = arg.arg
+ self.args[i] = arg.coerce_to(base_type, env)
+ elif dst_type.is_cpp_class:
+ # TODO(robertwb): Avoid object conversion for vector/list/set.
+ return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
+ elif self.mult_factor:
+ error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
+ elif dst_type.is_struct:
+ if len(self.args) > len(dst_type.scope.var_entries):
+ error(self.pos, "Too many members for '%s'" % dst_type)
+ else:
+ if len(self.args) < len(dst_type.scope.var_entries):
+ warning(self.pos, "Too few members for '%s'" % dst_type, 1)
+ for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
+ if isinstance(arg, CoerceToPyTypeNode):
+ arg = arg.arg
+ self.args[i] = arg.coerce_to(member.type, env)
+ self.type = dst_type
+ elif dst_type.is_ctuple:
+ return self.coerce_to_ctuple(dst_type, env)
+ else:
+ self.type = error_type
+ error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
+ return self
+
+ def as_list(self): # dummy for compatibility with TupleNode
+ return self
+
+ def as_tuple(self):
+ t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
+ if isinstance(self.constant_result, list):
+ t.constant_result = tuple(self.constant_result)
+ return t
+
+ def allocate_temp_result(self, code):
+ if self.type.is_array:
+ if self.in_module_scope:
+ self.temp_code = code.funcstate.allocate_temp(
+ self.type, manage_ref=False, static=True, reusable=False)
+ else:
+ # To be valid C++, we must allocate the memory on the stack
+ # manually and be sure not to reuse it for something else.
+ # Yes, this means that we leak a temp array variable.
+ self.temp_code = code.funcstate.allocate_temp(
+ self.type, manage_ref=False, reusable=False)
+ else:
+ SequenceNode.allocate_temp_result(self, code)
+
+ def calculate_constant_result(self):
+ if self.mult_factor:
+ raise ValueError() # may exceed the compile time memory
+ self.constant_result = [
+ arg.constant_result for arg in self.args]
+
+ def compile_time_value(self, denv):
+ l = self.compile_time_value_list(denv)
+ if self.mult_factor:
+ l *= self.mult_factor.compile_time_value(denv)
+ return l
+
+ def generate_operation_code(self, code):
+ if self.type.is_pyobject:
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.generate_sequence_packing_code(code)
+ elif self.type.is_array:
+ if self.mult_factor:
+ code.putln("{")
+ code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
+ code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
+ i=Naming.quick_temp_cname, count=self.mult_factor.result()))
+ offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
+ else:
+ offset = ''
+ for i, arg in enumerate(self.args):
+ if arg.type.is_array:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
+ self.result(), i, offset,
+ arg.result(), self.result()
+ ))
+ else:
+ code.putln("%s[%s%s] = %s;" % (
+ self.result(),
+ i,
+ offset,
+ arg.result()))
+ if self.mult_factor:
+ code.putln("}")
+ code.putln("}")
+ elif self.type.is_struct:
+ for arg, member in zip(self.args, self.type.scope.var_entries):
+ code.putln("%s.%s = %s;" % (
+ self.result(),
+ member.cname,
+ arg.result()))
+ else:
+ raise InternalError("List type never specified")
+
+
+class ScopedExprNode(ExprNode):
+ # Abstract base class for ExprNodes that have their own local
+ # scope, such as generator expressions.
+ #
+ # expr_scope Scope the inner scope of the expression
+
+ subexprs = []
+ expr_scope = None
+
+ # does this node really have a local scope, e.g. does it leak loop
+ # variables or not? non-leaking Py3 behaviour is default, except
+ # for list comprehensions where the behaviour differs in Py2 and
+ # Py3 (set in Parsing.py based on parser context)
+ has_local_scope = True
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ if expr_scope is not None:
+ self.expr_scope = expr_scope
+ elif self.has_local_scope:
+ self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
+ else:
+ self.expr_scope = None
+
+ def analyse_declarations(self, env):
+ self.init_scope(env)
+
+ def analyse_scoped_declarations(self, env):
+ # this is called with the expr_scope as env
+ pass
+
+ def analyse_types(self, env):
+ # no recursion here, the children will be analysed separately below
+ return self
+
+ def analyse_scoped_expressions(self, env):
+ # this is called with the expr_scope as env
+ return self
+
+ def generate_evaluation_code(self, code):
+ # set up local variables and free their references on exit
+ generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
+ if not self.has_local_scope or not self.expr_scope.var_entries:
+ # no local variables => delegate, done
+ generate_inner_evaluation_code(code)
+ return
+
+ code.putln('{ /* enter inner scope */')
+ py_entries = []
+ for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
+ if not entry.in_closure:
+ if entry.type.is_pyobject and entry.used:
+ py_entries.append(entry)
+ if not py_entries:
+ # no local Python references => no cleanup required
+ generate_inner_evaluation_code(code)
+ code.putln('} /* exit inner scope */')
+ return
+
+ # must free all local Python references at each exit point
+ old_loop_labels = code.new_loop_labels()
+ old_error_label = code.new_error_label()
+
+ generate_inner_evaluation_code(code)
+
+ # normal (non-error) exit
+ self._generate_vars_cleanup(code, py_entries)
+
+ # error/loop body exit points
+ exit_scope = code.new_label('exit_scope')
+ code.put_goto(exit_scope)
+ for label, old_label in ([(code.error_label, old_error_label)] +
+ list(zip(code.get_loop_labels(), old_loop_labels))):
+ if code.label_used(label):
+ code.put_label(label)
+ self._generate_vars_cleanup(code, py_entries)
+ code.put_goto(old_label)
+ code.put_label(exit_scope)
+ code.putln('} /* exit inner scope */')
+
+ code.set_loop_labels(old_loop_labels)
+ code.error_label = old_error_label
+
+ def _generate_vars_cleanup(self, code, py_entries):
+ for entry in py_entries:
+ if entry.is_cglobal:
+ code.put_var_gotref(entry)
+ code.put_decref_set(entry.cname, "Py_None")
+ else:
+ code.put_var_xdecref_clear(entry)
+
+
+class ComprehensionNode(ScopedExprNode):
+ # A list/set/dict comprehension
+
+ child_attrs = ["loop"]
+
+ is_temp = True
+ constant_result = not_a_constant
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_declarations(self, env):
+ self.append.target = self # this is used in the PyList_Append of the inner loop
+ self.init_scope(env)
+
+ def analyse_scoped_declarations(self, env):
+ self.loop.analyse_declarations(env)
+
+ def analyse_types(self, env):
+ if not self.has_local_scope:
+ self.loop = self.loop.analyse_expressions(env)
+ return self
+
+ def analyse_scoped_expressions(self, env):
+ if self.has_local_scope:
+ self.loop = self.loop.analyse_expressions(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ self.generate_operation_code(code)
+
+ def generate_operation_code(self, code):
+ if self.type is Builtin.list_type:
+ create_code = 'PyList_New(0)'
+ elif self.type is Builtin.set_type:
+ create_code = 'PySet_New(NULL)'
+ elif self.type is Builtin.dict_type:
+ create_code = 'PyDict_New()'
+ else:
+ raise InternalError("illegal type for comprehension: %s" % self.type)
+ code.putln('%s = %s; %s' % (
+ self.result(), create_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.result())
+ self.loop.generate_execution_code(code)
+
+ def annotate(self, code):
+ self.loop.annotate(code)
+
+
+class ComprehensionAppendNode(Node):
+ # Need to be careful to avoid infinite recursion:
+ # target must not be in child_attrs/subexprs
+
+ child_attrs = ['expr']
+ target = None
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_expressions(self, env):
+ self.expr = self.expr.analyse_expressions(env)
+ if not self.expr.type.is_pyobject:
+ self.expr = self.expr.coerce_to_pyobject(env)
+ return self
+
+ def generate_execution_code(self, code):
+ if self.target.type is list_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
+ function = "__Pyx_ListComp_Append"
+ elif self.target.type is set_type:
+ function = "PySet_Add"
+ else:
+ raise InternalError(
+ "Invalid type for comprehension node: %s" % self.target.type)
+
+ self.expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
+ function,
+ self.target.result(),
+ self.expr.result()
+ ), self.pos))
+ self.expr.generate_disposal_code(code)
+ self.expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.expr.annotate(code)
+
+class DictComprehensionAppendNode(ComprehensionAppendNode):
+ child_attrs = ['key_expr', 'value_expr']
+
+ def analyse_expressions(self, env):
+ self.key_expr = self.key_expr.analyse_expressions(env)
+ if not self.key_expr.type.is_pyobject:
+ self.key_expr = self.key_expr.coerce_to_pyobject(env)
+ self.value_expr = self.value_expr.analyse_expressions(env)
+ if not self.value_expr.type.is_pyobject:
+ self.value_expr = self.value_expr.coerce_to_pyobject(env)
+ return self
+
+ def generate_execution_code(self, code):
+ self.key_expr.generate_evaluation_code(code)
+ self.value_expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
+ self.target.result(),
+ self.key_expr.result(),
+ self.value_expr.result()
+ ), self.pos))
+ self.key_expr.generate_disposal_code(code)
+ self.key_expr.free_temps(code)
+ self.value_expr.generate_disposal_code(code)
+ self.value_expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.key_expr.generate_function_definitions(env, code)
+ self.value_expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.key_expr.annotate(code)
+ self.value_expr.annotate(code)
+
+
+class InlinedGeneratorExpressionNode(ExprNode):
+ # An inlined generator expression for which the result is calculated
+ # inside of the loop and returned as a single, first and only Generator
+ # return value.
+ # This will only be created by transforms when replacing safe builtin
+ # calls on generator expressions.
+ #
+ # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
+ # orig_func String the name of the builtin function this node replaces
+ # target ExprNode or None a 'target' for a ComprehensionAppend node
+
+ subexprs = ["gen"]
+ orig_func = None
+ target = None
+ is_temp = True
+ type = py_object_type
+
+ def __init__(self, pos, gen, comprehension_type=None, **kwargs):
+ gbody = gen.def_node.gbody
+ gbody.is_inlined = True
+ if comprehension_type is not None:
+ assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
+ gbody.inlined_comprehension_type = comprehension_type
+ kwargs.update(
+ target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
+ type=comprehension_type,
+ )
+ super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
+
+ def may_be_none(self):
+ return self.orig_func not in ('any', 'all', 'sorted')
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_types(self, env):
+ self.gen = self.gen.analyse_expressions(env)
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
+ self.result(), self.gen.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+
+class MergedSequenceNode(ExprNode):
+ """
+ Merge a sequence of iterables into a set/list/tuple.
+
+ The target collection is determined by self.type, which must be set externally.
+
+ args [ExprNode]
+ """
+ subexprs = ['args']
+ is_temp = True
+ gil_message = "Constructing Python collection"
+
+ def __init__(self, pos, args, type):
+ if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
+ # construct a list directly from the first argument that we can then extend
+ if args[0].type is not list_type:
+ args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True, mult_factor=args[0].mult_factor)
+ ExprNode.__init__(self, pos, args=args, type=type)
+
+ def calculate_constant_result(self):
+ result = []
+ for item in self.args:
+ if item.is_sequence_constructor and item.mult_factor:
+ if item.mult_factor.constant_result <= 0:
+ continue
+ # otherwise, adding each item once should be enough
+ if item.is_set_literal or item.is_sequence_constructor:
+ # process items in order
+ items = (arg.constant_result for arg in item.args)
+ else:
+ items = item.constant_result
+ result.extend(items)
+ if self.type is set_type:
+ result = set(result)
+ elif self.type is tuple_type:
+ result = tuple(result)
+ else:
+ assert self.type is list_type
+ self.constant_result = result
+
+ def compile_time_value(self, denv):
+ result = []
+ for item in self.args:
+ if item.is_sequence_constructor and item.mult_factor:
+ if item.mult_factor.compile_time_value(denv) <= 0:
+ continue
+ if item.is_set_literal or item.is_sequence_constructor:
+ # process items in order
+ items = (arg.compile_time_value(denv) for arg in item.args)
+ else:
+ items = item.compile_time_value(denv)
+ result.extend(items)
+ if self.type is set_type:
+ try:
+ result = set(result)
+ except Exception as e:
+ self.compile_time_value_error(e)
+ elif self.type is tuple_type:
+ result = tuple(result)
+ else:
+ assert self.type is list_type
+ return result
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_types(self, env):
+ args = [
+ arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
+ # FIXME: CPython's error message starts with the runtime function name
+ 'argument after * must be an iterable, not NoneType')
+ for arg in self.args
+ ]
+
+ if len(args) == 1 and args[0].type is self.type:
+ # strip this intermediate node and use the bare collection
+ return args[0]
+
+ assert self.type in (set_type, list_type, tuple_type)
+
+ self.args = args
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ is_set = self.type is set_type
+
+ args = iter(self.args)
+ item = next(args)
+ item.generate_evaluation_code(code)
+ if (is_set and item.is_set_literal or
+ not is_set and item.is_sequence_constructor and item.type is list_type):
+ code.putln("%s = %s;" % (self.result(), item.py_result()))
+ item.generate_post_assignment_code(code)
+ else:
+ code.putln("%s = %s(%s); %s" % (
+ self.result(),
+ 'PySet_New' if is_set else 'PySequence_List',
+ item.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ helpers = set()
+ if is_set:
+ add_func = "PySet_Add"
+ extend_func = "__Pyx_PySet_Update"
+ else:
+ add_func = "__Pyx_ListComp_Append"
+ extend_func = "__Pyx_PyList_Extend"
+
+ for item in args:
+ if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
+ (item.is_sequence_constructor and not item.mult_factor)):
+ if not is_set and item.args:
+ helpers.add(("ListCompAppend", "Optimize.c"))
+ for arg in item.args:
+ arg.generate_evaluation_code(code)
+ code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
+ add_func,
+ self.result(),
+ arg.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ continue
+
+ if is_set:
+ helpers.add(("PySet_Update", "Builtins.c"))
+ else:
+ helpers.add(("ListExtend", "Optimize.c"))
+
+ item.generate_evaluation_code(code)
+ code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
+ extend_func,
+ self.result(),
+ item.py_result()))
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ if self.type is tuple_type:
+ code.putln("{")
+ code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
+ Naming.quick_temp_cname,
+ self.result()))
+ code.put_decref(self.result(), py_object_type)
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ Naming.quick_temp_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+ code.putln("}")
+
+ for helper in sorted(helpers):
+ code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
+
+ def annotate(self, code):
+ for item in self.args:
+ item.annotate(code)
+
+
+class SetNode(ExprNode):
+ """
+ Set constructor.
+ """
+ subexprs = ['args']
+ type = set_type
+ is_set_literal = True
+ gil_message = "Constructing Python set"
+
+ def analyse_types(self, env):
+ for i in range(len(self.args)):
+ arg = self.args[i]
+ arg = arg.analyse_types(env)
+ self.args[i] = arg.coerce_to_pyobject(env)
+ self.type = set_type
+ self.is_temp = 1
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def calculate_constant_result(self):
+ self.constant_result = set([arg.constant_result for arg in self.args])
+
+ def compile_time_value(self, denv):
+ values = [arg.compile_time_value(denv) for arg in self.args]
+ try:
+ return set(values)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def generate_evaluation_code(self, code):
+ for arg in self.args:
+ arg.generate_evaluation_code(code)
+ self.allocate_temp_result(code)
+ code.putln(
+ "%s = PySet_New(0); %s" % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ for arg in self.args:
+ code.put_error_if_neg(
+ self.pos,
+ "PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+
+
+class DictNode(ExprNode):
+ # Dictionary constructor.
+ #
+ # key_value_pairs [DictItemNode]
+ # exclude_null_values [boolean] Do not add NULL values to dict
+ #
+ # obj_conversion_errors [PyrexError] used internally
+
+ subexprs = ['key_value_pairs']
+ is_temp = 1
+ exclude_null_values = False
+ type = dict_type
+ is_dict_literal = True
+ reject_duplicates = False
+
+ obj_conversion_errors = []
+
+ @classmethod
+ def from_pairs(cls, pos, pairs):
+ return cls(pos, key_value_pairs=[
+ DictItemNode(pos, key=k, value=v) for k, v in pairs])
+
+ def calculate_constant_result(self):
+ self.constant_result = dict([
+ item.constant_result for item in self.key_value_pairs])
+
+ def compile_time_value(self, denv):
+ pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
+ for item in self.key_value_pairs]
+ try:
+ return dict(pairs)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ # TODO: Infer struct constructors.
+ return dict_type
+
+ def analyse_types(self, env):
+ with local_errors(ignore=True) as errors:
+ self.key_value_pairs = [
+ item.analyse_types(env)
+ for item in self.key_value_pairs
+ ]
+ self.obj_conversion_errors = errors
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject:
+ self.release_errors()
+ if self.type.is_struct_or_union:
+ if not dict_type.subtype_of(dst_type):
+ error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
+ return DictNode(self.pos, key_value_pairs=[
+ DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
+ value=item.value.coerce_to_pyobject(env))
+ for item in self.key_value_pairs])
+ if not self.type.subtype_of(dst_type):
+ error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
+ elif dst_type.is_struct_or_union:
+ self.type = dst_type
+ if not dst_type.is_struct and len(self.key_value_pairs) != 1:
+ error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
+ elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
+ warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
+ for item in self.key_value_pairs:
+ if isinstance(item.key, CoerceToPyTypeNode):
+ item.key = item.key.arg
+ if not item.key.is_string_literal:
+ error(item.key.pos, "Invalid struct field identifier")
+ item.key = StringNode(item.key.pos, value="<error>")
+ else:
+ key = str(item.key.value) # converts string literals to unicode in Py3
+ member = dst_type.scope.lookup_here(key)
+ if not member:
+ error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
+ else:
+ value = item.value
+ if isinstance(value, CoerceToPyTypeNode):
+ value = value.arg
+ item.value = value.coerce_to(member.type, env)
+ else:
+ self.type = error_type
+ error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
+ return self
+
+ def release_errors(self):
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.obj_conversion_errors = []
+
+ gil_message = "Constructing Python dict"
+
+ def generate_evaluation_code(self, code):
+ # Custom method used here because key-value
+ # pairs are evaluated and used one at a time.
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ is_dict = self.type.is_pyobject
+ if is_dict:
+ self.release_errors()
+ code.putln(
+ "%s = __Pyx_PyDict_NewPresized(%d); %s" % (
+ self.result(),
+ len(self.key_value_pairs),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ keys_seen = set()
+ key_type = None
+ needs_error_helper = False
+
+ for item in self.key_value_pairs:
+ item.generate_evaluation_code(code)
+ if is_dict:
+ if self.exclude_null_values:
+ code.putln('if (%s) {' % item.value.py_result())
+ key = item.key
+ if self.reject_duplicates:
+ if keys_seen is not None:
+ # avoid runtime 'in' checks for literals that we can do at compile time
+ if not key.is_string_literal:
+ keys_seen = None
+ elif key.value in keys_seen:
+ # FIXME: this could be a compile time error, at least in Cython code
+ keys_seen = None
+ elif key_type is not type(key.value):
+ if key_type is None:
+ key_type = type(key.value)
+ keys_seen.add(key.value)
+ else:
+ # different types => may not be able to compare at compile time
+ keys_seen = None
+ else:
+ keys_seen.add(key.value)
+
+ if keys_seen is None:
+ code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
+ self.result(), key.py_result()))
+ # currently only used in function calls
+ needs_error_helper = True
+ code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
+ key.py_result(),
+ code.error_goto(item.pos)))
+ code.putln("} else {")
+
+ code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
+ self.result(),
+ item.key.py_result(),
+ item.value.py_result()))
+ if self.reject_duplicates and keys_seen is None:
+ code.putln('}')
+ if self.exclude_null_values:
+ code.putln('}')
+ else:
+ code.putln("%s.%s = %s;" % (
+ self.result(),
+ item.key.value,
+ item.value.result()))
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ if needs_error_helper:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
+
+ def annotate(self, code):
+ for item in self.key_value_pairs:
+ item.annotate(code)
+
+
+class DictItemNode(ExprNode):
+ # Represents a single item in a DictNode
+ #
+ # key ExprNode
+ # value ExprNode
+ subexprs = ['key', 'value']
+
+ nogil_check = None # Parent DictNode takes care of it
+
+ def calculate_constant_result(self):
+ self.constant_result = (
+ self.key.constant_result, self.value.constant_result)
+
+ def analyse_types(self, env):
+ self.key = self.key.analyse_types(env)
+ self.value = self.value.analyse_types(env)
+ self.key = self.key.coerce_to_pyobject(env)
+ self.value = self.value.coerce_to_pyobject(env)
+ return self
+
+ def generate_evaluation_code(self, code):
+ self.key.generate_evaluation_code(code)
+ self.value.generate_evaluation_code(code)
+
+ def generate_disposal_code(self, code):
+ self.key.generate_disposal_code(code)
+ self.value.generate_disposal_code(code)
+
+ def free_temps(self, code):
+ self.key.free_temps(code)
+ self.value.free_temps(code)
+
+ def __iter__(self):
+ return iter([self.key, self.value])
+
+
+class SortedDictKeysNode(ExprNode):
+ # build sorted list of dict keys, e.g. for dir()
+ subexprs = ['arg']
+
+ is_temp = True
+
+ def __init__(self, arg):
+ ExprNode.__init__(self, arg.pos, arg=arg)
+ self.type = Builtin.list_type
+
+ def analyse_types(self, env):
+ arg = self.arg.analyse_types(env)
+ if arg.type is Builtin.dict_type:
+ arg = arg.as_none_safe_node(
+ "'NoneType' object is not iterable")
+ self.arg = arg
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ dict_result = self.arg.py_result()
+ if self.arg.type is Builtin.dict_type:
+ code.putln('%s = PyDict_Keys(%s); %s' % (
+ self.result(), dict_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ else:
+ # originally used PyMapping_Keys() here, but that may return a tuple
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ 'PyObjectCallMethod0', 'ObjectHandling.c'))
+ keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
+ code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
+ self.result(), dict_result, keys_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
+ code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ code.put_gotref(self.py_result())
+ code.putln("}")
+ code.put_error_if_neg(
+ self.pos, 'PyList_Sort(%s)' % self.py_result())
+
+
+class ModuleNameMixin(object):
+ def get_py_mod_name(self, code):
+ return code.get_py_string_const(
+ self.module_name, identifier=True)
+
+ def get_py_qualified_name(self, code):
+ return code.get_py_string_const(
+ self.qualname, identifier=True)
+
+
+class ClassNode(ExprNode, ModuleNameMixin):
+ # Helper class used in the implementation of Python
+ # class definitions. Constructs a class object given
+ # a name, tuple of bases and class dictionary.
+ #
+ # name EncodedString Name of the class
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # doc ExprNode or None Doc string
+ # module_name EncodedString Name of defining module
+
+ subexprs = ['doc']
+ type = py_object_type
+ is_temp = True
+
+ def infer_type(self, env):
+ # TODO: could return 'type' in some cases
+ return py_object_type
+
+ def analyse_types(self, env):
+ if self.doc:
+ self.doc = self.doc.analyse_types(env)
+ self.doc = self.doc.coerce_to_pyobject(env)
+ env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
+ return self
+
+ def may_be_none(self):
+ return True
+
+ gil_message = "Constructing Python class"
+
+ def generate_result_code(self, code):
+ class_def_node = self.class_def_node
+ cname = code.intern_identifier(self.name)
+
+ if self.doc:
+ code.put_error_if_neg(self.pos,
+ 'PyDict_SetItem(%s, %s, %s)' % (
+ class_def_node.dict.py_result(),
+ code.intern_identifier(
+ StringEncoding.EncodedString("__doc__")),
+ self.doc.py_result()))
+ py_mod_name = self.get_py_mod_name(code)
+ qualname = self.get_py_qualified_name(code)
+ code.putln(
+ '%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
+ self.result(),
+ class_def_node.bases.py_result(),
+ class_def_node.dict.py_result(),
+ cname,
+ qualname,
+ py_mod_name,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class Py3ClassNode(ExprNode):
+ # Helper class used in the implementation of Python3+
+ # class definitions. Constructs a class object given
+ # a name, tuple of bases and class dictionary.
+ #
+ # name EncodedString Name of the class
+ # module_name EncodedString Name of defining module
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # calculate_metaclass bool should call CalculateMetaclass()
+ # allow_py2_metaclass bool should look for Py2 metaclass
+
+ subexprs = []
+ type = py_object_type
+ is_temp = True
+
+ def infer_type(self, env):
+ # TODO: could return 'type' in some cases
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return True
+
+ gil_message = "Constructing Python class"
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
+ cname = code.intern_identifier(self.name)
+ class_def_node = self.class_def_node
+ mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL'
+ if class_def_node.metaclass:
+ metaclass = class_def_node.metaclass.py_result()
+ else:
+ metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
+ code.putln(
+ '%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
+ self.result(),
+ metaclass,
+ cname,
+ class_def_node.bases.py_result(),
+ class_def_node.dict.py_result(),
+ mkw,
+ self.calculate_metaclass,
+ self.allow_py2_metaclass,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class PyClassMetaclassNode(ExprNode):
+ # Helper class holds Python3 metaclass object
+ #
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+
+ subexprs = []
+
+ def analyse_types(self, env):
+ self.type = py_object_type
+ self.is_temp = True
+ return self
+
+ def may_be_none(self):
+ return True
+
+ def generate_result_code(self, code):
+ bases = self.class_def_node.bases
+ mkw = self.class_def_node.mkw
+ if mkw:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
+ call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
+ bases.result(),
+ mkw.result())
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
+ call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
+ bases.result())
+ code.putln(
+ "%s = %s; %s" % (
+ self.result(), call,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
+ # Helper class holds Python3 namespace object
+ #
+ # All this are not owned by this node
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # doc ExprNode or None Doc string (owned)
+
+ subexprs = ['doc']
+
+ def analyse_types(self, env):
+ if self.doc:
+ self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env)
+ self.type = py_object_type
+ self.is_temp = 1
+ return self
+
+ def may_be_none(self):
+ return True
+
+ def generate_result_code(self, code):
+ cname = code.intern_identifier(self.name)
+ py_mod_name = self.get_py_mod_name(code)
+ qualname = self.get_py_qualified_name(code)
+ class_def_node = self.class_def_node
+ null = "(PyObject *) NULL"
+ doc_code = self.doc.result() if self.doc else null
+ mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null
+ metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null
+ code.putln(
+ "%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
+ self.result(),
+ metaclass,
+ class_def_node.bases.result(),
+ cname,
+ qualname,
+ mkw,
+ py_mod_name,
+ doc_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class ClassCellInjectorNode(ExprNode):
+ # Initialize CyFunction.func_classobj
+ is_temp = True
+ type = py_object_type
+ subexprs = []
+ is_active = False
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ assert self.is_active
+ code.putln(
+ '%s = PyList_New(0); %s' % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+ def generate_injection_code(self, code, classobj_cname):
+ assert self.is_active
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
+ code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
+ self.result(), classobj_cname))
+
+
+class ClassCellNode(ExprNode):
+ # Class Cell for noargs super()
+ subexprs = []
+ is_temp = True
+ is_generator = False
+ type = py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ if not self.is_generator:
+ code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
+ self.result(),
+ Naming.self_cname))
+ else:
+ code.putln('%s = %s->classobj;' % (
+ self.result(), Naming.generator_cname))
+ code.putln(
+ 'if (!%s) { PyErr_SetString(PyExc_SystemError, '
+ '"super(): empty __class__ cell"); %s }' % (
+ self.result(),
+ code.error_goto(self.pos)))
+ code.put_incref(self.result(), py_object_type)
+
+
+class PyCFunctionNode(ExprNode, ModuleNameMixin):
+ # Helper class used in the implementation of Python
+ # functions. Constructs a PyCFunction object
+ # from a PyMethodDef struct.
+ #
+ # pymethdef_cname string PyMethodDef structure
+ # self_object ExprNode or None
+ # binding bool
+ # def_node DefNode the Python function node
+ # module_name EncodedString Name of defining module
+ # code_object CodeObjectNode the PyCodeObject creator node
+
+ subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
+ 'annotations_dict']
+
+ self_object = None
+ code_object = None
+ binding = False
+ def_node = None
+ defaults = None
+ defaults_struct = None
+ defaults_pyobjects = 0
+ defaults_tuple = None
+ defaults_kwdict = None
+ annotations_dict = None
+
+ type = py_object_type
+ is_temp = 1
+
+ specialized_cpdefs = None
+ is_specialization = False
+
+ @classmethod
+ def from_defnode(cls, node, binding):
+ return cls(node.pos,
+ def_node=node,
+ pymethdef_cname=node.entry.pymethdef_cname,
+ binding=binding or node.specialized_cpdefs,
+ specialized_cpdefs=node.specialized_cpdefs,
+ code_object=CodeObjectNode(node))
+
+ def analyse_types(self, env):
+ if self.binding:
+ self.analyse_default_args(env)
+ return self
+
+ def analyse_default_args(self, env):
+ """
+ Handle non-literal function's default arguments.
+ """
+ nonliteral_objects = []
+ nonliteral_other = []
+ default_args = []
+ default_kwargs = []
+ annotations = []
+
+ # For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants
+ # for default arguments to avoid the dependency on the CyFunction object as 'self' argument
+ # in the underlying C function. Basically, cpdef functions/methods are static C functions,
+ # so their optional arguments must be static, too.
+ # TODO: change CyFunction implementation to pass both function object and owning object for method calls
+ must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope)
+
+ for arg in self.def_node.args:
+ if arg.default and not must_use_constants:
+ if not arg.default.is_literal:
+ arg.is_dynamic = True
+ if arg.type.is_pyobject:
+ nonliteral_objects.append(arg)
+ else:
+ nonliteral_other.append(arg)
+ else:
+ arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
+ if arg.kw_only:
+ default_kwargs.append(arg)
+ else:
+ default_args.append(arg)
+ if arg.annotation:
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
+ annotations.append((arg.pos, arg.name, arg.annotation))
+
+ for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
+ if arg and arg.annotation:
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
+ annotations.append((arg.pos, arg.name, arg.annotation))
+
+ annotation = self.def_node.return_type_annotation
+ if annotation:
+ annotation = self.analyse_annotation(env, annotation)
+ self.def_node.return_type_annotation = annotation
+ annotations.append((annotation.pos, StringEncoding.EncodedString("return"), annotation))
+
+ if nonliteral_objects or nonliteral_other:
+ module_scope = env.global_scope()
+ cname = module_scope.next_id(Naming.defaults_struct_prefix)
+ scope = Symtab.StructOrUnionScope(cname)
+ self.defaults = []
+ for arg in nonliteral_objects:
+ entry = scope.declare_var(arg.name, arg.type, None,
+ Naming.arg_prefix + arg.name,
+ allow_pyobject=True)
+ self.defaults.append((arg, entry))
+ for arg in nonliteral_other:
+ entry = scope.declare_var(arg.name, arg.type, None,
+ Naming.arg_prefix + arg.name,
+ allow_pyobject=False, allow_memoryview=True)
+ self.defaults.append((arg, entry))
+ entry = module_scope.declare_struct_or_union(
+ None, 'struct', scope, 1, None, cname=cname)
+ self.defaults_struct = scope
+ self.defaults_pyobjects = len(nonliteral_objects)
+ for arg, entry in self.defaults:
+ arg.default_value = '%s->%s' % (
+ Naming.dynamic_args_cname, entry.cname)
+ self.def_node.defaults_struct = self.defaults_struct.name
+
+ if default_args or default_kwargs:
+ if self.defaults_struct is None:
+ if default_args:
+ defaults_tuple = TupleNode(self.pos, args=[
+ arg.default for arg in default_args])
+ self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
+ if default_kwargs:
+ defaults_kwdict = DictNode(self.pos, key_value_pairs=[
+ DictItemNode(
+ arg.pos,
+ key=IdentifierStringNode(arg.pos, value=arg.name),
+ value=arg.default)
+ for arg in default_kwargs])
+ self.defaults_kwdict = defaults_kwdict.analyse_types(env)
+ else:
+ if default_args:
+ defaults_tuple = DefaultsTupleNode(
+ self.pos, default_args, self.defaults_struct)
+ else:
+ defaults_tuple = NoneNode(self.pos)
+ if default_kwargs:
+ defaults_kwdict = DefaultsKwDictNode(
+ self.pos, default_kwargs, self.defaults_struct)
+ else:
+ defaults_kwdict = NoneNode(self.pos)
+
+ defaults_getter = Nodes.DefNode(
+ self.pos, args=[], star_arg=None, starstar_arg=None,
+ body=Nodes.ReturnStatNode(
+ self.pos, return_type=py_object_type,
+ value=TupleNode(
+ self.pos, args=[defaults_tuple, defaults_kwdict])),
+ decorators=None,
+ name=StringEncoding.EncodedString("__defaults__"))
+ # defaults getter must never live in class scopes, it's always a module function
+ module_scope = env.global_scope()
+ defaults_getter.analyse_declarations(module_scope)
+ defaults_getter = defaults_getter.analyse_expressions(module_scope)
+ defaults_getter.body = defaults_getter.body.analyse_expressions(
+ defaults_getter.local_scope)
+ defaults_getter.py_wrapper_required = False
+ defaults_getter.pymethdef_required = False
+ self.def_node.defaults_getter = defaults_getter
+ if annotations:
+ annotations_dict = DictNode(self.pos, key_value_pairs=[
+ DictItemNode(
+ pos, key=IdentifierStringNode(pos, value=name),
+ value=value)
+ for pos, name, value in annotations])
+ self.annotations_dict = annotations_dict.analyse_types(env)
+
+ def analyse_annotation(self, env, annotation):
+ if annotation is None:
+ return None
+ atype = annotation.analyse_as_type(env)
+ if atype is not None:
+ # Keep parsed types as strings as they might not be Python representable.
+ annotation = UnicodeNode(
+ annotation.pos,
+ value=StringEncoding.EncodedString(atype.declaration_code('', for_display=True)))
+ annotation = annotation.analyse_types(env)
+ if not annotation.type.is_pyobject:
+ annotation = annotation.coerce_to_pyobject(env)
+ return annotation
+
+ def may_be_none(self):
+ return False
+
+ gil_message = "Constructing Python function"
+
+ def self_result_code(self):
+ if self.self_object is None:
+ self_result = "NULL"
+ else:
+ self_result = self.self_object.py_result()
+ return self_result
+
+ def generate_result_code(self, code):
+ if self.binding:
+ self.generate_cyfunction_code(code)
+ else:
+ self.generate_pycfunction_code(code)
+
+ def generate_pycfunction_code(self, code):
+ py_mod_name = self.get_py_mod_name(code)
+ code.putln(
+ '%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
+ self.result(),
+ self.pymethdef_cname,
+ self.self_result_code(),
+ py_mod_name,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ def generate_cyfunction_code(self, code):
+ if self.specialized_cpdefs:
+ def_node = self.specialized_cpdefs[0]
+ else:
+ def_node = self.def_node
+
+ if self.specialized_cpdefs or self.is_specialization:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
+ constructor = "__pyx_FusedFunction_New"
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
+ constructor = "__Pyx_CyFunction_New"
+
+ if self.code_object:
+ code_object_result = self.code_object.py_result()
+ else:
+ code_object_result = 'NULL'
+
+ flags = []
+ if def_node.is_staticmethod:
+ flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
+ elif def_node.is_classmethod:
+ flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
+
+ if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous:
+ flags.append('__Pyx_CYFUNCTION_CCLASS')
+
+ if flags:
+ flags = ' | '.join(flags)
+ else:
+ flags = '0'
+
+ code.putln(
+ '%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
+ self.result(),
+ constructor,
+ self.pymethdef_cname,
+ flags,
+ self.get_py_qualified_name(code),
+ self.self_result_code(),
+ self.get_py_mod_name(code),
+ Naming.moddict_cname,
+ code_object_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ if def_node.requires_classobj:
+ assert code.pyclass_stack, "pyclass_stack is empty"
+ class_node = code.pyclass_stack[-1]
+ code.put_incref(self.py_result(), py_object_type)
+ code.putln(
+ 'PyList_Append(%s, %s);' % (
+ class_node.class_cell.result(),
+ self.result()))
+ code.put_giveref(self.py_result())
+
+ if self.defaults:
+ code.putln(
+ 'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
+ self.result(), self.defaults_struct.name,
+ self.defaults_pyobjects, code.error_goto(self.pos)))
+ defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
+ self.defaults_struct.name, self.result())
+ for arg, entry in self.defaults:
+ arg.generate_assignment_code(code, target='%s->%s' % (
+ defaults, entry.cname))
+
+ if self.defaults_tuple:
+ code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
+ self.result(), self.defaults_tuple.py_result()))
+ if self.defaults_kwdict:
+ code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
+ self.result(), self.defaults_kwdict.py_result()))
+ if def_node.defaults_getter and not self.specialized_cpdefs:
+ # Fused functions do not support dynamic defaults, only their specialisations can have them for now.
+ code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
+ self.result(), def_node.defaults_getter.entry.pyfunc_cname))
+ if self.annotations_dict:
+ code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
+ self.result(), self.annotations_dict.py_result()))
+
+
+class InnerFunctionNode(PyCFunctionNode):
+ # Special PyCFunctionNode that depends on a closure class
+ #
+
+ binding = True
+ needs_self_code = True
+
+ def self_result_code(self):
+ if self.needs_self_code:
+ return "((PyObject*)%s)" % Naming.cur_scope_cname
+ return "NULL"
+
+
+class CodeObjectNode(ExprNode):
+ # Create a PyCodeObject for a CyFunction instance.
+ #
+ # def_node DefNode the Python function node
+ # varnames TupleNode a tuple with all local variable names
+
+ subexprs = ['varnames']
+ is_temp = False
+ result_code = None
+
+ def __init__(self, def_node):
+ ExprNode.__init__(self, def_node.pos, def_node=def_node)
+ args = list(def_node.args)
+ # if we have args/kwargs, then the first two in var_entries are those
+ local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
+ self.varnames = TupleNode(
+ def_node.pos,
+ args=[IdentifierStringNode(arg.pos, value=arg.name)
+ for arg in args + local_vars],
+ is_temp=0,
+ is_literal=1)
+
+ def may_be_none(self):
+ return False
+
+ def calculate_result_code(self, code=None):
+ if self.result_code is None:
+ self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
+ return self.result_code
+
+ def generate_result_code(self, code):
+ if self.result_code is None:
+ self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
+
+ code = code.get_cached_constants_writer(self.result_code)
+ if code is None:
+ return # already initialised
+ code.mark_pos(self.pos)
+ func = self.def_node
+ func_name = code.get_py_string_const(
+ func.name, identifier=True, is_str=False, unicode_value=func.name)
+ # FIXME: better way to get the module file path at module init time? Encoding to use?
+ file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
+ # XXX Use get_description() to set arcadia root relative filename
+ file_path = StringEncoding.bytes_literal(func.pos[0].get_description().encode('utf8'), 'utf8')
+ file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
+
+ # This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
+ flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
+
+ if self.def_node.star_arg:
+ flags.append('CO_VARARGS')
+ if self.def_node.starstar_arg:
+ flags.append('CO_VARKEYWORDS')
+
+ code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
+ self.result_code,
+ len(func.args) - func.num_kwonly_args, # argcount
+ func.num_kwonly_args, # kwonlyargcount (Py3 only)
+ len(self.varnames.args), # nlocals
+ '|'.join(flags) or '0', # flags
+ Naming.empty_bytes, # code
+ Naming.empty_tuple, # consts
+ Naming.empty_tuple, # names (FIXME)
+ self.varnames.result(), # varnames
+ Naming.empty_tuple, # freevars (FIXME)
+ Naming.empty_tuple, # cellvars (FIXME)
+ file_path_const, # filename
+ func_name, # name
+ self.pos[1], # firstlineno
+ Naming.empty_bytes, # lnotab
+ code.error_goto_if_null(self.result_code, self.pos),
+ ))
+
+
+class DefaultLiteralArgNode(ExprNode):
+ # CyFunction's literal argument default value
+ #
+ # Evaluate literal only once.
+
+ subexprs = []
+ is_literal = True
+ is_temp = False
+
+ def __init__(self, pos, arg):
+ super(DefaultLiteralArgNode, self).__init__(pos)
+ self.arg = arg
+ self.type = self.arg.type
+ self.evaluated = False
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_evaluation_code(self, code):
+ if not self.evaluated:
+ self.arg.generate_evaluation_code(code)
+ self.evaluated = True
+
+ def result(self):
+ return self.type.cast_code(self.arg.result())
+
+
+class DefaultNonLiteralArgNode(ExprNode):
+ # CyFunction's non-literal argument default value
+
+ subexprs = []
+
+ def __init__(self, pos, arg, defaults_struct):
+ super(DefaultNonLiteralArgNode, self).__init__(pos)
+ self.arg = arg
+ self.defaults_struct = defaults_struct
+
+ def analyse_types(self, env):
+ self.type = self.arg.type
+ self.is_temp = False
+ return self
+
+ def generate_result_code(self, code):
+ pass
+
+ def result(self):
+ return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
+ self.defaults_struct.name, Naming.self_cname,
+ self.defaults_struct.lookup(self.arg.name).cname)
+
+
+class DefaultsTupleNode(TupleNode):
+ # CyFunction's __defaults__ tuple
+
+ def __init__(self, pos, defaults, defaults_struct):
+ args = []
+ for arg in defaults:
+ if not arg.default.is_literal:
+ arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
+ else:
+ arg = arg.default
+ args.append(arg)
+ super(DefaultsTupleNode, self).__init__(pos, args=args)
+
+ def analyse_types(self, env, skip_children=False):
+ return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
+
+
+class DefaultsKwDictNode(DictNode):
+ # CyFunction's __kwdefaults__ dict
+
+ def __init__(self, pos, defaults, defaults_struct):
+ items = []
+ for arg in defaults:
+ name = IdentifierStringNode(arg.pos, value=arg.name)
+ if not arg.default.is_literal:
+ arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
+ else:
+ arg = arg.default
+ items.append(DictItemNode(arg.pos, key=name, value=arg))
+ super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
+
+
+class LambdaNode(InnerFunctionNode):
+ # Lambda expression node (only used as a function reference)
+ #
+ # args [CArgDeclNode] formal arguments
+ # star_arg PyArgDeclNode or None * argument
+ # starstar_arg PyArgDeclNode or None ** argument
+ # lambda_name string a module-globally unique lambda name
+ # result_expr ExprNode
+ # def_node DefNode the underlying function 'def' node
+
+ child_attrs = ['def_node']
+
+ name = StringEncoding.EncodedString('<lambda>')
+
+ def analyse_declarations(self, env):
+ self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
+ self.def_node.no_assignment_synthesis = True
+ self.def_node.pymethdef_required = True
+ self.def_node.analyse_declarations(env)
+ self.def_node.is_cyfunction = True
+ self.pymethdef_cname = self.def_node.entry.pymethdef_cname
+ env.add_lambda_def(self.def_node)
+
+ def analyse_types(self, env):
+ self.def_node = self.def_node.analyse_expressions(env)
+ return super(LambdaNode, self).analyse_types(env)
+
+ def generate_result_code(self, code):
+ self.def_node.generate_execution_code(code)
+ super(LambdaNode, self).generate_result_code(code)
+
+
+class GeneratorExpressionNode(LambdaNode):
+ # A generator expression, e.g. (i for i in range(10))
+ #
+ # Result is a generator.
+ #
+ # loop ForStatNode the for-loop, containing a YieldExprNode
+ # def_node DefNode the underlying generator 'def' node
+
+ name = StringEncoding.EncodedString('genexpr')
+ binding = False
+
+ def analyse_declarations(self, env):
+ self.genexpr_name = env.next_id('genexpr')
+ super(GeneratorExpressionNode, self).analyse_declarations(env)
+ # No pymethdef required
+ self.def_node.pymethdef_required = False
+ self.def_node.py_wrapper_required = False
+ self.def_node.is_cyfunction = False
+ # Force genexpr signature
+ self.def_node.entry.signature = TypeSlots.pyfunction_noargs
+
+ def generate_result_code(self, code):
+ code.putln(
+ '%s = %s(%s); %s' % (
+ self.result(),
+ self.def_node.entry.pyfunc_cname,
+ self.self_result_code(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+
+class YieldExprNode(ExprNode):
+ # Yield expression node
+ #
+ # arg ExprNode the value to return from the generator
+ # label_num integer yield label number
+ # is_yield_from boolean is a YieldFromExprNode to delegate to another generator
+
+ subexprs = ['arg']
+ type = py_object_type
+ label_num = 0
+ is_yield_from = False
+ is_await = False
+ in_async_gen = False
+ expr_keyword = 'yield'
+
+ def analyse_types(self, env):
+ if not self.label_num or (self.is_yield_from and self.in_async_gen):
+ error(self.pos, "'%s' not supported here" % self.expr_keyword)
+ self.is_temp = 1
+ if self.arg is not None:
+ self.arg = self.arg.analyse_types(env)
+ if not self.arg.type.is_pyobject:
+ self.coerce_yield_argument(env)
+ return self
+
+ def coerce_yield_argument(self, env):
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def generate_evaluation_code(self, code):
+ if self.arg:
+ self.arg.generate_evaluation_code(code)
+ self.arg.make_owned_reference(code)
+ code.putln(
+ "%s = %s;" % (
+ Naming.retval_cname,
+ self.arg.result_as(py_object_type)))
+ self.arg.generate_post_assignment_code(code)
+ self.arg.free_temps(code)
+ else:
+ code.put_init_to_py_none(Naming.retval_cname, py_object_type)
+ self.generate_yield_code(code)
+
+ def generate_yield_code(self, code):
+ """
+ Generate the code to return the argument in 'Naming.retval_cname'
+ and to continue at the yield label.
+ """
+ label_num, label_name = code.new_yield_label(
+ self.expr_keyword.replace(' ', '_'))
+ code.use_label(label_name)
+
+ saved = []
+ code.funcstate.closure_temps.reset()
+ for cname, type, manage_ref in code.funcstate.temps_in_use():
+ save_cname = code.funcstate.closure_temps.allocate_temp(type)
+ saved.append((cname, save_cname, type))
+ if type.is_pyobject:
+ code.put_xgiveref(cname)
+ code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
+
+ code.put_xgiveref(Naming.retval_cname)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
+ code.put_finish_refcount_context()
+
+ if code.funcstate.current_except is not None:
+ # inside of an except block => save away currently handled exception
+ code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
+ else:
+ # no exceptions being handled => restore exception state of caller
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+
+ code.putln("/* return from %sgenerator, %sing value */" % (
+ 'async ' if self.in_async_gen else '',
+ 'await' if self.is_await else 'yield'))
+ code.putln("%s->resume_label = %d;" % (
+ Naming.generator_cname, label_num))
+ if self.in_async_gen and not self.is_await:
+ # __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
+ code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
+ else:
+ code.putln("return %s;" % Naming.retval_cname)
+
+ code.put_label(label_name)
+ for cname, save_cname, type in saved:
+ code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
+ if type.is_pyobject:
+ code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
+ code.put_xgotref(cname)
+ self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
+ if self.result_is_used:
+ self.allocate_temp_result(code)
+ code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
+ code.put_incref(self.result(), py_object_type)
+
+ def generate_sent_value_handling_code(self, code, value_cname):
+ code.putln(code.error_goto_if_null(value_cname, self.pos))
+
+
+class _YieldDelegationExprNode(YieldExprNode):
+ def yield_from_func(self, code):
+ raise NotImplementedError()
+
+ def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
+ if source_cname is None:
+ self.arg.generate_evaluation_code(code)
+ code.putln("%s = %s(%s, %s);" % (
+ Naming.retval_cname,
+ self.yield_from_func(code),
+ Naming.generator_cname,
+ self.arg.py_result() if source_cname is None else source_cname))
+ if source_cname is None:
+ self.arg.generate_disposal_code(code)
+ self.arg.free_temps(code)
+ elif decref_source:
+ code.put_decref_clear(source_cname, py_object_type)
+ code.put_xgotref(Naming.retval_cname)
+
+ code.putln("if (likely(%s)) {" % Naming.retval_cname)
+ self.generate_yield_code(code)
+ code.putln("} else {")
+ # either error or sub-generator has normally terminated: return value => node result
+ if self.result_is_used:
+ self.fetch_iteration_result(code)
+ else:
+ self.handle_iteration_exception(code)
+ code.putln("}")
+
+ def fetch_iteration_result(self, code):
+ # YieldExprNode has allocated the result temp for us
+ code.putln("%s = NULL;" % self.result())
+ code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
+ code.put_gotref(self.result())
+
+ def handle_iteration_exception(self, code):
+ code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
+ code.putln("if (exc_type) {")
+ code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
+ code.putln("else %s" % code.error_goto(self.pos))
+ code.putln("}")
+
+
+class YieldFromExprNode(_YieldDelegationExprNode):
+ # "yield from GEN" expression
+ is_yield_from = True
+ expr_keyword = 'yield from'
+
+ def coerce_yield_argument(self, env):
+ if not self.arg.type.is_string:
+ # FIXME: support C arrays and C++ iterators?
+ error(self.pos, "yielding from non-Python object not supported")
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def yield_from_func(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
+ return "__Pyx_Generator_Yield_From"
+
+
+class AwaitExprNode(_YieldDelegationExprNode):
+ # 'await' expression node
+ #
+ # arg ExprNode the Awaitable value to await
+ # label_num integer yield label number
+
+ is_await = True
+ expr_keyword = 'await'
+
+ def coerce_yield_argument(self, env):
+ if self.arg is not None:
+ # FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def yield_from_func(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
+ return "__Pyx_Coroutine_Yield_From"
+
+
+class AwaitIterNextExprNode(AwaitExprNode):
+ # 'await' expression node as part of 'async for' iteration
+ #
+ # Breaks out of loop on StopAsyncIteration exception.
+
+ def _generate_break(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
+ code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
+ " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
+ code.putln("PyErr_Clear();")
+ code.putln("break;")
+ code.putln("}")
+
+ def fetch_iteration_result(self, code):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ self._generate_break(code)
+ super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
+
+ def generate_sent_value_handling_code(self, code, value_cname):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ code.putln("if (unlikely(!%s)) {" % value_cname)
+ self._generate_break(code)
+ # all non-break exceptions are errors, as in parent class
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+
+class GlobalsExprNode(AtomicExprNode):
+ type = dict_type
+ is_temp = 1
+
+ def analyse_types(self, env):
+ env.use_utility_code(Builtin.globals_utility_code)
+ return self
+
+ gil_message = "Constructing globals dict"
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ code.putln('%s = __Pyx_Globals(); %s' % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+
+class LocalsDictItemNode(DictItemNode):
+ def analyse_types(self, env):
+ self.key = self.key.analyse_types(env)
+ self.value = self.value.analyse_types(env)
+ self.key = self.key.coerce_to_pyobject(env)
+ if self.value.type.can_coerce_to_pyobject(env):
+ self.value = self.value.coerce_to_pyobject(env)
+ else:
+ self.value = None
+ return self
+
+
+class FuncLocalsExprNode(DictNode):
+ def __init__(self, pos, env):
+ local_vars = sorted([
+ entry.name for entry in env.entries.values() if entry.name])
+ items = [LocalsDictItemNode(
+ pos, key=IdentifierStringNode(pos, value=var),
+ value=NameNode(pos, name=var, allow_null=True))
+ for var in local_vars]
+ DictNode.__init__(self, pos, key_value_pairs=items,
+ exclude_null_values=True)
+
+ def analyse_types(self, env):
+ node = super(FuncLocalsExprNode, self).analyse_types(env)
+ node.key_value_pairs = [ i for i in node.key_value_pairs
+ if i.value is not None ]
+ return node
+
+
+class PyClassLocalsExprNode(AtomicExprNode):
+ def __init__(self, pos, pyclass_dict):
+ AtomicExprNode.__init__(self, pos)
+ self.pyclass_dict = pyclass_dict
+
+ def analyse_types(self, env):
+ self.type = self.pyclass_dict.type
+ self.is_temp = False
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def result(self):
+ return self.pyclass_dict.result()
+
+ def generate_result_code(self, code):
+ pass
+
+
+def LocalsExprNode(pos, scope_node, env):
+ if env.is_module_scope:
+ return GlobalsExprNode(pos)
+ if env.is_py_class_scope:
+ return PyClassLocalsExprNode(pos, scope_node.dict)
+ return FuncLocalsExprNode(pos, env)
+
+
+#-------------------------------------------------------------------
+#
+# Unary operator nodes
+#
+#-------------------------------------------------------------------
+
+compile_time_unary_operators = {
+ 'not': operator.not_,
+ '~': operator.inv,
+ '-': operator.neg,
+ '+': operator.pos,
+}
+
+class UnopNode(ExprNode):
+ # operator string
+ # operand ExprNode
+ #
+ # Processing during analyse_expressions phase:
+ #
+ # analyse_c_operation
+ # Called when the operand is not a pyobject.
+ # - Check operand type and coerce if needed.
+ # - Determine result type and result code fragment.
+ # - Allocate temporary for result if needed.
+
+ subexprs = ['operand']
+ infix = True
+
+ def calculate_constant_result(self):
+ func = compile_time_unary_operators[self.operator]
+ self.constant_result = func(self.operand.constant_result)
+
+ def compile_time_value(self, denv):
+ func = compile_time_unary_operators.get(self.operator)
+ if not func:
+ error(self.pos,
+ "Unary '%s' not supported in compile-time expression"
+ % self.operator)
+ operand = self.operand.compile_time_value(denv)
+ try:
+ return func(operand)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_type(self, env):
+ operand_type = self.operand.infer_type(env)
+ if operand_type.is_cpp_class or operand_type.is_ptr:
+ cpp_type = operand_type.find_cpp_operation_type(self.operator)
+ if cpp_type is not None:
+ return cpp_type
+ return self.infer_unop_type(env, operand_type)
+
+ def infer_unop_type(self, env, operand_type):
+ if operand_type.is_pyobject:
+ return py_object_type
+ else:
+ return operand_type
+
+ def may_be_none(self):
+ if self.operand.type and self.operand.type.is_builtin_type:
+ if self.operand.type is not type_type:
+ return False
+ return ExprNode.may_be_none(self)
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ if self.is_pythran_operation(env):
+ self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
+ self.is_temp = 1
+ elif self.is_py_operation():
+ self.coerce_operand_to_pyobject(env)
+ self.type = py_object_type
+ self.is_temp = 1
+ elif self.is_cpp_operation():
+ self.analyse_cpp_operation(env)
+ else:
+ self.analyse_c_operation(env)
+ return self
+
+ def check_const(self):
+ return self.operand.check_const()
+
+ def is_py_operation(self):
+ return self.operand.type.is_pyobject or self.operand.type.is_ctuple
+
+ def is_pythran_operation(self, env):
+ np_pythran = has_np_pythran(env)
+ op_type = self.operand.type
+ return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
+
+ def nogil_check(self, env):
+ if self.is_py_operation():
+ self.gil_error()
+
+ def is_cpp_operation(self):
+ type = self.operand.type
+ return type.is_cpp_class
+
+ def coerce_operand_to_pyobject(self, env):
+ self.operand = self.operand.coerce_to_pyobject(env)
+
+ def generate_result_code(self, code):
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran unaryop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ self.result(),
+ self.result(),
+ self.operator,
+ self.operand.pythran_result()))
+ elif self.operand.type.is_pyobject:
+ self.generate_py_operation_code(code)
+ elif self.is_temp:
+ if self.is_cpp_operation() and self.exception_check == '+':
+ translate_cpp_exception(code, self.pos,
+ "%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
+
+ def generate_py_operation_code(self, code):
+ function = self.py_operation_function(code)
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ function,
+ self.operand.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+
+ def type_error(self):
+ if not self.operand.type.is_error:
+ error(self.pos, "Invalid operand type for '%s' (%s)" %
+ (self.operator, self.operand.type))
+ self.type = PyrexTypes.error_type
+
+ def analyse_cpp_operation(self, env, overload_check=True):
+ entry = env.lookup_operator(self.operator, [self.operand])
+ if overload_check and not entry:
+ self.type_error()
+ return
+ if entry:
+ self.exception_check = entry.type.exception_check
+ self.exception_value = entry.type.exception_value
+ if self.exception_check == '+':
+ self.is_temp = True
+ if self.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ else:
+ self.exception_check = ''
+ self.exception_value = ''
+ cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
+ if overload_check and cpp_type is None:
+ error(self.pos, "'%s' operator not defined for %s" % (
+ self.operator, type))
+ self.type_error()
+ return
+ self.type = cpp_type
+
+
+class NotNode(UnopNode):
+ # 'not' operator
+ #
+ # operand ExprNode
+ operator = '!'
+
+ type = PyrexTypes.c_bint_type
+
+ def calculate_constant_result(self):
+ self.constant_result = not self.operand.constant_result
+
+ def compile_time_value(self, denv):
+ operand = self.operand.compile_time_value(denv)
+ try:
+ return not operand
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_unop_type(self, env, operand_type):
+ return PyrexTypes.c_bint_type
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ operand_type = self.operand.type
+ if operand_type.is_cpp_class:
+ self.analyse_cpp_operation(env)
+ else:
+ self.operand = self.operand.coerce_to_boolean(env)
+ return self
+
+ def calculate_result_code(self):
+ return "(!%s)" % self.operand.result()
+
+
+class UnaryPlusNode(UnopNode):
+ # unary '+' operator
+
+ operator = '+'
+
+ def analyse_c_operation(self, env):
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+
+ def py_operation_function(self, code):
+ return "PyNumber_Positive"
+
+ def calculate_result_code(self):
+ if self.is_cpp_operation():
+ return "(+%s)" % self.operand.result()
+ else:
+ return self.operand.result()
+
+
+class UnaryMinusNode(UnopNode):
+ # unary '-' operator
+
+ operator = '-'
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_numeric:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_enum:
+ self.type = PyrexTypes.c_int_type
+ else:
+ self.type_error()
+ if self.type.is_complex:
+ self.infix = False
+
+ def py_operation_function(self, code):
+ return "PyNumber_Negative"
+
+ def calculate_result_code(self):
+ if self.infix:
+ return "(-%s)" % self.operand.result()
+ else:
+ return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
+
+ def get_constant_c_result_code(self):
+ value = self.operand.get_constant_c_result_code()
+ if value:
+ return "(-%s)" % value
+
+class TildeNode(UnopNode):
+ # unary '~' operator
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_int:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_enum:
+ self.type = PyrexTypes.c_int_type
+ else:
+ self.type_error()
+
+ def py_operation_function(self, code):
+ return "PyNumber_Invert"
+
+ def calculate_result_code(self):
+ return "(~%s)" % self.operand.result()
+
+
+class CUnopNode(UnopNode):
+
+ def is_py_operation(self):
+ return False
+
+class DereferenceNode(CUnopNode):
+ # unary * operator
+
+ operator = '*'
+
+ def infer_unop_type(self, env, operand_type):
+ if operand_type.is_ptr:
+ return operand_type.base_type
+ else:
+ return PyrexTypes.error_type
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_ptr:
+ self.type = self.operand.type.base_type
+ else:
+ self.type_error()
+
+ def calculate_result_code(self):
+ return "(*%s)" % self.operand.result()
+
+
+class DecrementIncrementNode(CUnopNode):
+ # unary ++/-- operator
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_numeric:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_ptr:
+ self.type = self.operand.type
+ else:
+ self.type_error()
+
+ def calculate_result_code(self):
+ if self.is_prefix:
+ return "(%s%s)" % (self.operator, self.operand.result())
+ else:
+ return "(%s%s)" % (self.operand.result(), self.operator)
+
+def inc_dec_constructor(is_prefix, operator):
+ return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
+
+
+class AmpersandNode(CUnopNode):
+ # The C address-of operator.
+ #
+ # operand ExprNode
+ operator = '&'
+
+ def infer_unop_type(self, env, operand_type):
+ return PyrexTypes.c_ptr_type(operand_type)
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ argtype = self.operand.type
+ if argtype.is_cpp_class:
+ self.analyse_cpp_operation(env, overload_check=False)
+ if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
+ if argtype.is_memoryviewslice:
+ self.error("Cannot take address of memoryview slice")
+ else:
+ self.error("Taking address of non-lvalue (type %s)" % argtype)
+ return self
+ if argtype.is_pyobject:
+ self.error("Cannot take address of Python %s" % (
+ "variable '%s'" % self.operand.name if self.operand.is_name else
+ "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
+ "object"))
+ return self
+ if not argtype.is_cpp_class or not self.type:
+ self.type = PyrexTypes.c_ptr_type(argtype)
+ return self
+
+ def check_const(self):
+ return self.operand.check_const_addr()
+
+ def error(self, mess):
+ error(self.pos, mess)
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+
+ def calculate_result_code(self):
+ return "(&%s)" % self.operand.result()
+
+ def generate_result_code(self, code):
+ if (self.operand.type.is_cpp_class and self.exception_check == '+'):
+ translate_cpp_exception(code, self.pos,
+ "%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+
+
+unop_node_classes = {
+ "+": UnaryPlusNode,
+ "-": UnaryMinusNode,
+ "~": TildeNode,
+}
+
+def unop_node(pos, operator, operand):
+ # Construct unnop node of appropriate class for
+ # given operator.
+ if isinstance(operand, IntNode) and operator == '-':
+ return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
+ longness=operand.longness, unsigned=operand.unsigned)
+ elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
+ warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
+ return unop_node_classes[operator](pos,
+ operator = operator,
+ operand = operand)
+
+
+class TypecastNode(ExprNode):
+ # C type cast
+ #
+ # operand ExprNode
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+ # typecheck boolean
+ #
+ # If used from a transform, one can if wanted specify the attribute
+ # "type" directly and leave base_type and declarator to None
+
+ subexprs = ['operand']
+ base_type = declarator = type = None
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ if self.type is None:
+ base_type = self.base_type.analyse(env)
+ _, self.type = self.declarator.analyse(base_type, env)
+ return self.type
+
+ def analyse_types(self, env):
+ if self.type is None:
+ base_type = self.base_type.analyse(env)
+ _, self.type = self.declarator.analyse(base_type, env)
+ if self.operand.has_constant_result():
+ # Must be done after self.type is resolved.
+ self.calculate_constant_result()
+ if self.type.is_cfunction:
+ error(self.pos,
+ "Cannot cast to a function type")
+ self.type = PyrexTypes.error_type
+ self.operand = self.operand.analyse_types(env)
+ if self.type is PyrexTypes.c_bint_type:
+ # short circuit this to a coercion
+ return self.operand.coerce_to_boolean(env)
+ to_py = self.type.is_pyobject
+ from_py = self.operand.type.is_pyobject
+ if from_py and not to_py and self.operand.is_ephemeral():
+ if not self.type.is_numeric and not self.type.is_cpp_class:
+ error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
+ if to_py and not from_py:
+ if self.type is bytes_type and self.operand.type.is_int:
+ return CoerceIntToBytesNode(self.operand, env)
+ elif self.operand.type.can_coerce_to_pyobject(env):
+ self.result_ctype = py_object_type
+ self.operand = self.operand.coerce_to(self.type, env)
+ else:
+ if self.operand.type.is_ptr:
+ if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
+ error(self.pos, "Python objects cannot be cast from pointers of primitive types")
+ else:
+ # Should this be an error?
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
+ self.operand.type, self.type))
+ self.operand = self.operand.coerce_to_simple(env)
+ elif from_py and not to_py:
+ if self.type.create_from_py_utility_code(env):
+ self.operand = self.operand.coerce_to(self.type, env)
+ elif self.type.is_ptr:
+ if not (self.type.base_type.is_void or self.type.base_type.is_struct):
+ error(self.pos, "Python objects cannot be cast to pointers of primitive types")
+ else:
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
+ self.type, self.operand.type))
+ elif from_py and to_py:
+ if self.typecheck:
+ self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
+ elif isinstance(self.operand, SliceIndexNode):
+ # This cast can influence the created type of string slices.
+ self.operand = self.operand.coerce_to(self.type, env)
+ elif self.type.is_complex and self.operand.type.is_complex:
+ self.operand = self.operand.coerce_to_simple(env)
+ elif self.operand.type.is_fused:
+ self.operand = self.operand.coerce_to(self.type, env)
+ #self.type = self.operand.type
+ if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil:
+ op_type = self.operand.type
+ if op_type.is_ptr:
+ op_type = op_type.base_type
+ if op_type.is_cfunction and not op_type.nogil:
+ warning(self.pos,
+ "Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1)
+ return self
+
+ def is_simple(self):
+ # either temp or a C cast => no side effects other than the operand's
+ return self.operand.is_simple()
+
+ def is_ephemeral(self):
+ # either temp or a C cast => no side effects other than the operand's
+ return self.operand.is_ephemeral()
+
+ def nonlocally_immutable(self):
+ return self.is_temp or self.operand.nonlocally_immutable()
+
+ def nogil_check(self, env):
+ if self.type and self.type.is_pyobject and self.is_temp:
+ self.gil_error()
+
+ def check_const(self):
+ return self.operand.check_const()
+
+ def calculate_constant_result(self):
+ self.constant_result = self.calculate_result_code(self.operand.constant_result)
+
+ def calculate_result_code(self, operand_result = None):
+ if operand_result is None:
+ operand_result = self.operand.result()
+ if self.type.is_complex:
+ operand_result = self.operand.result()
+ if self.operand.type.is_complex:
+ real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
+ imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
+ else:
+ real_part = self.type.real_type.cast_code(operand_result)
+ imag_part = "0"
+ return "%s(%s, %s)" % (
+ self.type.from_parts,
+ real_part,
+ imag_part)
+ else:
+ return self.type.cast_code(operand_result)
+
+ def get_constant_c_result_code(self):
+ operand_result = self.operand.get_constant_c_result_code()
+ if operand_result:
+ return self.type.cast_code(operand_result)
+
+ def result_as(self, type):
+ if self.type.is_pyobject and not self.is_temp:
+ # Optimise away some unnecessary casting
+ return self.operand.result_as(type)
+ else:
+ return ExprNode.result_as(self, type)
+
+ def generate_result_code(self, code):
+ if self.is_temp:
+ code.putln(
+ "%s = (PyObject *)%s;" % (
+ self.result(),
+ self.operand.result()))
+ code.put_incref(self.result(), self.ctype())
+
+
+ERR_START = "Start may not be given"
+ERR_NOT_STOP = "Stop must be provided to indicate shape"
+ERR_STEPS = ("Strides may only be given to indicate contiguity. "
+ "Consider slicing it after conversion")
+ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
+ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
+
+
+class CythonArrayNode(ExprNode):
+ """
+ Used when a pointer of base_type is cast to a memoryviewslice with that
+ base type. i.e.
+
+ <int[:M:1, :N]> p
+
+ creates a fortran-contiguous cython.array.
+
+ We leave the type set to object so coercions to object are more efficient
+ and less work. Acquiring a memoryviewslice from this will be just as
+ efficient. ExprNode.coerce_to() will do the additional typecheck on
+ self.compile_time_type
+
+ This also handles <int[:, :]> my_c_array
+
+
+ operand ExprNode the thing we're casting
+ base_type_node MemoryViewSliceTypeNode the cast expression node
+ """
+
+ subexprs = ['operand', 'shapes']
+
+ shapes = None
+ is_temp = True
+ mode = "c"
+ array_dtype = None
+
+ shape_type = PyrexTypes.c_py_ssize_t_type
+
+ def analyse_types(self, env):
+ from . import MemoryView
+
+ self.operand = self.operand.analyse_types(env)
+ if self.array_dtype:
+ array_dtype = self.array_dtype
+ else:
+ array_dtype = self.base_type_node.base_type_node.analyse(env)
+ axes = self.base_type_node.axes
+
+ self.type = error_type
+ self.shapes = []
+ ndim = len(axes)
+
+ # Base type of the pointer or C array we are converting
+ base_type = self.operand.type
+
+ if not self.operand.type.is_ptr and not self.operand.type.is_array:
+ error(self.operand.pos, ERR_NOT_POINTER)
+ return self
+
+ # Dimension sizes of C array
+ array_dimension_sizes = []
+ if base_type.is_array:
+ while base_type.is_array:
+ array_dimension_sizes.append(base_type.size)
+ base_type = base_type.base_type
+ elif base_type.is_ptr:
+ base_type = base_type.base_type
+ else:
+ error(self.pos, "unexpected base type %s found" % base_type)
+ return self
+
+ if not (base_type.same_as(array_dtype) or base_type.is_void):
+ error(self.operand.pos, ERR_BASE_TYPE)
+ return self
+ elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
+ error(self.operand.pos,
+ "Expected %d dimensions, array has %d dimensions" %
+ (ndim, len(array_dimension_sizes)))
+ return self
+
+ # Verify the start, stop and step values
+ # In case of a C array, use the size of C array in each dimension to
+ # get an automatic cast
+ for axis_no, axis in enumerate(axes):
+ if not axis.start.is_none:
+ error(axis.start.pos, ERR_START)
+ return self
+
+ if axis.stop.is_none:
+ if array_dimension_sizes:
+ dimsize = array_dimension_sizes[axis_no]
+ axis.stop = IntNode(self.pos, value=str(dimsize),
+ constant_result=dimsize,
+ type=PyrexTypes.c_int_type)
+ else:
+ error(axis.pos, ERR_NOT_STOP)
+ return self
+
+ axis.stop = axis.stop.analyse_types(env)
+ shape = axis.stop.coerce_to(self.shape_type, env)
+ if not shape.is_literal:
+ shape.coerce_to_temp(env)
+
+ self.shapes.append(shape)
+
+ first_or_last = axis_no in (0, ndim - 1)
+ if not axis.step.is_none and first_or_last:
+ # '1' in the first or last dimension denotes F or C contiguity
+ axis.step = axis.step.analyse_types(env)
+ if (not axis.step.type.is_int and axis.step.is_literal and not
+ axis.step.type.is_error):
+ error(axis.step.pos, "Expected an integer literal")
+ return self
+
+ if axis.step.compile_time_value(env) != 1:
+ error(axis.step.pos, ERR_STEPS)
+ return self
+
+ if axis_no == 0:
+ self.mode = "fortran"
+
+ elif not axis.step.is_none and not first_or_last:
+ # step provided in some other dimension
+ error(axis.step.pos, ERR_STEPS)
+ return self
+
+ if not self.operand.is_name:
+ self.operand = self.operand.coerce_to_temp(env)
+
+ axes = [('direct', 'follow')] * len(axes)
+ if self.mode == "fortran":
+ axes[0] = ('direct', 'contig')
+ else:
+ axes[-1] = ('direct', 'contig')
+
+ self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
+ self.coercion_type.validate_memslice_dtype(self.pos)
+ self.type = self.get_cython_array_type(env)
+ MemoryView.use_cython_array_utility_code(env)
+ env.use_utility_code(MemoryView.typeinfo_to_format_code)
+ return self
+
+ def allocate_temp_result(self, code):
+ if self.temp_code:
+ raise RuntimeError("temp allocated multiple times")
+
+ self.temp_code = code.funcstate.allocate_temp(self.type, True)
+
+ def infer_type(self, env):
+ return self.get_cython_array_type(env)
+
+ def get_cython_array_type(self, env):
+ cython_scope = env.global_scope().context.cython_scope
+ cython_scope.load_cythonscope()
+ return cython_scope.viewscope.lookup("array").type
+
+ def generate_result_code(self, code):
+ from . import Buffer
+
+ shapes = [self.shape_type.cast_code(shape.result())
+ for shape in self.shapes]
+ dtype = self.coercion_type.dtype
+
+ shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
+ format_temp = code.funcstate.allocate_temp(py_object_type, True)
+
+ itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
+ type_info = Buffer.get_type_information_cname(code, dtype)
+
+ if self.operand.type.is_ptr:
+ code.putln("if (!%s) {" % self.operand.result())
+ code.putln( 'PyErr_SetString(PyExc_ValueError,'
+ '"Cannot create cython.array from NULL pointer");')
+ code.putln(code.error_goto(self.operand.pos))
+ code.putln("}")
+
+ code.putln("%s = __pyx_format_from_typeinfo(&%s); %s" % (
+ format_temp,
+ type_info,
+ code.error_goto_if_null(format_temp, self.pos),
+ ))
+ code.put_gotref(format_temp)
+
+ buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
+ code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s); %s' % (
+ shapes_temp,
+ buildvalue_fmt,
+ ", ".join(shapes),
+ code.error_goto_if_null(shapes_temp, self.pos),
+ ))
+ code.put_gotref(shapes_temp)
+
+ tup = (self.result(), shapes_temp, itemsize, format_temp,
+ self.mode, self.operand.result())
+ code.putln('%s = __pyx_array_new('
+ '%s, %s, PyBytes_AS_STRING(%s), '
+ '(char *) "%s", (char *) %s);' % tup)
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ code.put_gotref(self.result())
+
+ def dispose(temp):
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+
+ dispose(shapes_temp)
+ dispose(format_temp)
+
+ @classmethod
+ def from_carray(cls, src_node, env):
+ """
+ Given a C array type, return a CythonArrayNode
+ """
+ pos = src_node.pos
+ base_type = src_node.type
+
+ none_node = NoneNode(pos)
+ axes = []
+
+ while base_type.is_array:
+ axes.append(SliceNode(pos, start=none_node, stop=none_node,
+ step=none_node))
+ base_type = base_type.base_type
+ axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
+
+ memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
+ base_type_node=base_type)
+ result = CythonArrayNode(pos, base_type_node=memslicenode,
+ operand=src_node, array_dtype=base_type)
+ result = result.analyse_types(env)
+ return result
+
+class SizeofNode(ExprNode):
+ # Abstract base class for sizeof(x) expression nodes.
+
+ type = PyrexTypes.c_size_t_type
+
+ def check_const(self):
+ return True
+
+ def generate_result_code(self, code):
+ pass
+
+
+class SizeofTypeNode(SizeofNode):
+ # C sizeof function applied to a type
+ #
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+
+ subexprs = []
+ arg_type = None
+
+ def analyse_types(self, env):
+ # we may have incorrectly interpreted a dotted name as a type rather than an attribute
+ # this could be better handled by more uniformly treating types as runtime-available objects
+ if 0 and self.base_type.module_path:
+ path = self.base_type.module_path
+ obj = env.lookup(path[0])
+ if obj.as_module is None:
+ operand = NameNode(pos=self.pos, name=path[0])
+ for attr in path[1:]:
+ operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
+ operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
+ node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
+ return node
+ if self.arg_type is None:
+ base_type = self.base_type.analyse(env)
+ _, arg_type = self.declarator.analyse(base_type, env)
+ self.arg_type = arg_type
+ self.check_type()
+ return self
+
+ def check_type(self):
+ arg_type = self.arg_type
+ if not arg_type:
+ return
+ if arg_type.is_pyobject and not arg_type.is_extension_type:
+ error(self.pos, "Cannot take sizeof Python object")
+ elif arg_type.is_void:
+ error(self.pos, "Cannot take sizeof void")
+ elif not arg_type.is_complete():
+ error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
+
+ def calculate_result_code(self):
+ if self.arg_type.is_extension_type:
+ # the size of the pointer is boring
+ # we want the size of the actual struct
+ arg_code = self.arg_type.declaration_code("", deref=1)
+ else:
+ arg_code = self.arg_type.empty_declaration_code()
+ return "(sizeof(%s))" % arg_code
+
+
+class SizeofVarNode(SizeofNode):
+ # C sizeof function applied to a variable
+ #
+ # operand ExprNode
+
+ subexprs = ['operand']
+
+ def analyse_types(self, env):
+ # We may actually be looking at a type rather than a variable...
+ # If we are, traditional analysis would fail...
+ operand_as_type = self.operand.analyse_as_type(env)
+ if operand_as_type:
+ self.arg_type = operand_as_type
+ if self.arg_type.is_fused:
+ self.arg_type = self.arg_type.specialize(env.fused_to_specific)
+ self.__class__ = SizeofTypeNode
+ self.check_type()
+ else:
+ self.operand = self.operand.analyse_types(env)
+ return self
+
+ def calculate_result_code(self):
+ return "(sizeof(%s))" % self.operand.result()
+
+ def generate_result_code(self, code):
+ pass
+
+
+class TypeidNode(ExprNode):
+ # C++ typeid operator applied to a type or variable
+ #
+ # operand ExprNode
+ # arg_type ExprNode
+ # is_variable boolean
+
+ type = PyrexTypes.error_type
+
+ subexprs = ['operand']
+
+ arg_type = None
+ is_variable = None
+ is_temp = 1
+
+ def get_type_info_type(self, env):
+ env_module = env
+ while not env_module.is_module_scope:
+ env_module = env_module.outer_scope
+ typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
+ typeinfo_entry = typeinfo_module.lookup('type_info')
+ return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type))
+
+ cpp_message = 'typeid operator'
+
+ def analyse_types(self, env):
+ self.cpp_check(env)
+ type_info = self.get_type_info_type(env)
+ if not type_info:
+ self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
+ return self
+ self.type = type_info
+ as_type = self.operand.analyse_as_type(env)
+ if as_type:
+ self.arg_type = as_type
+ self.is_type = True
+ else:
+ self.arg_type = self.operand.analyse_types(env)
+ self.is_type = False
+ if self.arg_type.type.is_pyobject:
+ self.error("Cannot use typeid on a Python object")
+ return self
+ elif self.arg_type.type.is_void:
+ self.error("Cannot use typeid on void")
+ return self
+ elif not self.arg_type.type.is_complete():
+ self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
+ return self
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ return self
+
+ def error(self, mess):
+ error(self.pos, mess)
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+
+ def check_const(self):
+ return True
+
+ def calculate_result_code(self):
+ return self.temp_code
+
+ def generate_result_code(self, code):
+ if self.is_type:
+ arg_code = self.arg_type.empty_declaration_code()
+ else:
+ arg_code = self.arg_type.result()
+ translate_cpp_exception(code, self.pos,
+ "%s = typeid(%s);" % (self.temp_code, arg_code),
+ None, None, self.in_nogil_context)
+
+class TypeofNode(ExprNode):
+ # Compile-time type of an expression, as a string.
+ #
+ # operand ExprNode
+ # literal StringNode # internal
+
+ literal = None
+ type = py_object_type
+
+ subexprs = ['literal'] # 'operand' will be ignored after type analysis!
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
+ literal = StringNode(self.pos, value=value)
+ literal = literal.analyse_types(env)
+ self.literal = literal.coerce_to_pyobject(env)
+ return self
+
+ def analyse_as_type(self, env):
+ self.operand = self.operand.analyse_types(env)
+ return self.operand.type
+
+ def may_be_none(self):
+ return False
+
+ def generate_evaluation_code(self, code):
+ self.literal.generate_evaluation_code(code)
+
+ def calculate_result_code(self):
+ return self.literal.calculate_result_code()
+
+#-------------------------------------------------------------------
+#
+# Binary operator nodes
+#
+#-------------------------------------------------------------------
+
+try:
+ matmul_operator = operator.matmul
+except AttributeError:
+ def matmul_operator(a, b):
+ try:
+ func = a.__matmul__
+ except AttributeError:
+ func = b.__rmatmul__
+ return func(a, b)
+
+compile_time_binary_operators = {
+ '<': operator.lt,
+ '<=': operator.le,
+ '==': operator.eq,
+ '!=': operator.ne,
+ '>=': operator.ge,
+ '>': operator.gt,
+ 'is': operator.is_,
+ 'is_not': operator.is_not,
+ '+': operator.add,
+ '&': operator.and_,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '<<': operator.lshift,
+ '%': operator.mod,
+ '*': operator.mul,
+ '|': operator.or_,
+ '**': operator.pow,
+ '>>': operator.rshift,
+ '-': operator.sub,
+ '^': operator.xor,
+ '@': matmul_operator,
+ 'in': lambda x, seq: x in seq,
+ 'not_in': lambda x, seq: x not in seq,
+}
+
+def get_compile_time_binop(node):
+ func = compile_time_binary_operators.get(node.operator)
+ if not func:
+ error(node.pos,
+ "Binary '%s' not supported in compile-time expression"
+ % node.operator)
+ return func
+
+
+class BinopNode(ExprNode):
+ # operator string
+ # operand1 ExprNode
+ # operand2 ExprNode
+ #
+ # Processing during analyse_expressions phase:
+ #
+ # analyse_c_operation
+ # Called when neither operand is a pyobject.
+ # - Check operand types and coerce if needed.
+ # - Determine result type and result code fragment.
+ # - Allocate temporary for result if needed.
+
+ subexprs = ['operand1', 'operand2']
+ inplace = False
+
+ def calculate_constant_result(self):
+ func = compile_time_binary_operators[self.operator]
+ self.constant_result = func(
+ self.operand1.constant_result,
+ self.operand2.constant_result)
+
+ def compile_time_value(self, denv):
+ func = get_compile_time_binop(self)
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ return func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_type(self, env):
+ return self.result_type(self.operand1.infer_type(env),
+ self.operand2.infer_type(env), env)
+
+ def analyse_types(self, env):
+ self.operand1 = self.operand1.analyse_types(env)
+ self.operand2 = self.operand2.analyse_types(env)
+ self.analyse_operation(env)
+ return self
+
+ def analyse_operation(self, env):
+ if self.is_pythran_operation(env):
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type, env)
+ assert self.type.is_pythran_expr
+ self.is_temp = 1
+ elif self.is_py_operation():
+ self.coerce_operands_to_pyobjects(env)
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type, env)
+ assert self.type.is_pyobject
+ self.is_temp = 1
+ elif self.is_cpp_operation():
+ self.analyse_cpp_operation(env)
+ else:
+ self.analyse_c_operation(env)
+
+ def is_py_operation(self):
+ return self.is_py_operation_types(self.operand1.type, self.operand2.type)
+
+ def is_py_operation_types(self, type1, type2):
+ return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
+
+ def is_pythran_operation(self, env):
+ return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
+
+ def is_pythran_operation_types(self, type1, type2, env):
+ # Support only expr op supported_type, or supported_type op expr
+ return has_np_pythran(env) and \
+ (is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
+ (is_pythran_expr(type1) or is_pythran_expr(type2))
+
+ def is_cpp_operation(self):
+ return (self.operand1.type.is_cpp_class
+ or self.operand2.type.is_cpp_class)
+
+ def analyse_cpp_operation(self, env):
+ entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
+ if not entry:
+ self.type_error()
+ return
+ func_type = entry.type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check == '+':
+ # Used by NumBinopNodes to break up expressions involving multiple
+ # operators so that exceptions can be handled properly.
+ self.is_temp = 1
+ if self.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ if len(func_type.args) == 1:
+ self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
+ else:
+ self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
+ self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
+ self.type = func_type.return_type
+
+ def result_type(self, type1, type2, env):
+ if self.is_pythran_operation_types(type1, type2, env):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ if self.is_py_operation_types(type1, type2):
+ if type2.is_string:
+ type2 = Builtin.bytes_type
+ elif type2.is_pyunicode_ptr:
+ type2 = Builtin.unicode_type
+ if type1.is_string:
+ type1 = Builtin.bytes_type
+ elif type1.is_pyunicode_ptr:
+ type1 = Builtin.unicode_type
+ if type1.is_builtin_type or type2.is_builtin_type:
+ if type1 is type2 and self.operator in '**%+|&^':
+ # FIXME: at least these operators should be safe - others?
+ return type1
+ result_type = self.infer_builtin_types_operation(type1, type2)
+ if result_type is not None:
+ return result_type
+ return py_object_type
+ elif type1.is_error or type2.is_error:
+ return PyrexTypes.error_type
+ else:
+ return self.compute_c_result_type(type1, type2)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ return None
+
+ def nogil_check(self, env):
+ if self.is_py_operation():
+ self.gil_error()
+
+ def coerce_operands_to_pyobjects(self, env):
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+
+ def check_const(self):
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def is_ephemeral(self):
+ return (super(BinopNode, self).is_ephemeral() or
+ self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
+
+ def generate_result_code(self, code):
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran binop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ if self.operator == '**':
+ code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % (
+ self.result(),
+ self.result(),
+ self.operand1.pythran_result(),
+ self.operand2.pythran_result()))
+ else:
+ code.putln("new (&%s) decltype(%s){%s %s %s};" % (
+ self.result(),
+ self.result(),
+ self.operand1.pythran_result(),
+ self.operator,
+ self.operand2.pythran_result()))
+ elif self.operand1.type.is_pyobject:
+ function = self.py_operation_function(code)
+ if self.operator == '**':
+ extra_args = ", Py_None"
+ else:
+ extra_args = ""
+ code.putln(
+ "%s = %s(%s, %s%s); %s" % (
+ self.result(),
+ function,
+ self.operand1.py_result(),
+ self.operand2.py_result(),
+ extra_args,
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.py_result())
+ elif self.is_temp:
+ # C++ overloaded operators with exception values are currently all
+ # handled through temporaries.
+ if self.is_cpp_operation() and self.exception_check == '+':
+ translate_cpp_exception(code, self.pos,
+ "%s = %s;" % (self.result(), self.calculate_result_code()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
+
+ def type_error(self):
+ if not (self.operand1.type.is_error
+ or self.operand2.type.is_error):
+ error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
+ (self.operator, self.operand1.type,
+ self.operand2.type))
+ self.type = PyrexTypes.error_type
+
+
+class CBinopNode(BinopNode):
+
+ def analyse_types(self, env):
+ node = BinopNode.analyse_types(self, env)
+ if node.is_py_operation():
+ node.type = PyrexTypes.error_type
+ return node
+
+ def py_operation_function(self, code):
+ return ""
+
+ def calculate_result_code(self):
+ return "(%s %s %s)" % (
+ self.operand1.result(),
+ self.operator,
+ self.operand2.result())
+
+ def compute_c_result_type(self, type1, type2):
+ cpp_type = None
+ if type1.is_cpp_class or type1.is_ptr:
+ cpp_type = type1.find_cpp_operation_type(self.operator, type2)
+ if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
+ cpp_type = type2.find_cpp_operation_type(self.operator, type1)
+ # FIXME: do we need to handle other cases here?
+ return cpp_type
+
+
+def c_binop_constructor(operator):
+ def make_binop_node(pos, **operands):
+ return CBinopNode(pos, operator=operator, **operands)
+ return make_binop_node
+
+class NumBinopNode(BinopNode):
+ # Binary operation taking numeric arguments.
+
+ infix = True
+ overflow_check = False
+ overflow_bit_node = None
+
+ def analyse_c_operation(self, env):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ self.type = self.compute_c_result_type(type1, type2)
+ if not self.type:
+ self.type_error()
+ return
+ if self.type.is_complex:
+ self.infix = False
+ if (self.type.is_int
+ and env.directives['overflowcheck']
+ and self.operator in self.overflow_op_names):
+ if (self.operator in ('+', '*')
+ and self.operand1.has_constant_result()
+ and not self.operand2.has_constant_result()):
+ self.operand1, self.operand2 = self.operand2, self.operand1
+ self.overflow_check = True
+ self.overflow_fold = env.directives['overflowcheck.fold']
+ self.func = self.type.overflow_check_binop(
+ self.overflow_op_names[self.operator],
+ env,
+ const_rhs = self.operand2.has_constant_result())
+ self.is_temp = True
+ if not self.infix or (type1.is_numeric and type2.is_numeric):
+ self.operand1 = self.operand1.coerce_to(self.type, env)
+ self.operand2 = self.operand2.coerce_to(self.type, env)
+
+ def compute_c_result_type(self, type1, type2):
+ if self.c_types_okay(type1, type2):
+ widest_type = PyrexTypes.widest_numeric_type(type1, type2)
+ if widest_type is PyrexTypes.c_bint_type:
+ if self.operator not in '|^&':
+ # False + False == 0 # not False!
+ widest_type = PyrexTypes.c_int_type
+ else:
+ widest_type = PyrexTypes.widest_numeric_type(
+ widest_type, PyrexTypes.c_int_type)
+ return widest_type
+ else:
+ return None
+
+ def may_be_none(self):
+ if self.type and self.type.is_builtin_type:
+ # if we know the result type, we know the operation, so it can't be None
+ return False
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
+ # XXX: I can't think of any case where a binary operation
+ # on builtin types evaluates to None - add a special case
+ # here if there is one.
+ return False
+ return super(NumBinopNode, self).may_be_none()
+
+ def get_constant_c_result_code(self):
+ value1 = self.operand1.get_constant_c_result_code()
+ value2 = self.operand2.get_constant_c_result_code()
+ if value1 and value2:
+ return "(%s %s %s)" % (value1, self.operator, value2)
+ else:
+ return None
+
+ def c_types_okay(self, type1, type2):
+ #print "NumBinopNode.c_types_okay:", type1, type2 ###
+ return (type1.is_numeric or type1.is_enum) \
+ and (type2.is_numeric or type2.is_enum)
+
+ def generate_evaluation_code(self, code):
+ if self.overflow_check:
+ self.overflow_bit_node = self
+ self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = 0;" % self.overflow_bit)
+ super(NumBinopNode, self).generate_evaluation_code(code)
+ if self.overflow_check:
+ code.putln("if (unlikely(%s)) {" % self.overflow_bit)
+ code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(self.overflow_bit)
+
+ def calculate_result_code(self):
+ if self.overflow_bit_node is not None:
+ return "%s(%s, %s, &%s)" % (
+ self.func,
+ self.operand1.result(),
+ self.operand2.result(),
+ self.overflow_bit_node.overflow_bit)
+ elif self.type.is_cpp_class or self.infix:
+ if is_pythran_expr(self.type):
+ result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
+ else:
+ result1, result2 = self.operand1.result(), self.operand2.result()
+ return "(%s %s %s)" % (result1, self.operator, result2)
+ else:
+ func = self.type.binary_op(self.operator)
+ if func is None:
+ error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
+ return "%s(%s, %s)" % (
+ func,
+ self.operand1.result(),
+ self.operand2.result())
+
+ def is_py_operation_types(self, type1, type2):
+ return (type1.is_unicode_char or
+ type2.is_unicode_char or
+ BinopNode.is_py_operation_types(self, type1, type2))
+
+ def py_operation_function(self, code):
+ function_name = self.py_functions[self.operator]
+ if self.inplace:
+ function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
+ return function_name
+
+ py_functions = {
+ "|": "PyNumber_Or",
+ "^": "PyNumber_Xor",
+ "&": "PyNumber_And",
+ "<<": "PyNumber_Lshift",
+ ">>": "PyNumber_Rshift",
+ "+": "PyNumber_Add",
+ "-": "PyNumber_Subtract",
+ "*": "PyNumber_Multiply",
+ "@": "__Pyx_PyNumber_MatrixMultiply",
+ "/": "__Pyx_PyNumber_Divide",
+ "//": "PyNumber_FloorDivide",
+ "%": "PyNumber_Remainder",
+ "**": "PyNumber_Power",
+ }
+
+ overflow_op_names = {
+ "+": "add",
+ "-": "sub",
+ "*": "mul",
+ "<<": "lshift",
+ }
+
+
+class IntBinopNode(NumBinopNode):
+ # Binary operation taking integer arguments.
+
+ def c_types_okay(self, type1, type2):
+ #print "IntBinopNode.c_types_okay:", type1, type2 ###
+ return (type1.is_int or type1.is_enum) \
+ and (type2.is_int or type2.is_enum)
+
+
+class AddNode(NumBinopNode):
+ # '+' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
+ return 1
+ else:
+ return NumBinopNode.is_py_operation_types(self, type1, type2)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # b'abc' + 'abc' raises an exception in Py3,
+ # so we can safely infer the Py2 type for bytes here
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ if type1 in string_types and type2 in string_types:
+ return string_types[max(string_types.index(type1),
+ string_types.index(type2))]
+ return None
+
+ def compute_c_result_type(self, type1, type2):
+ #print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
+ if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
+ return type1
+ elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
+ return type2
+ else:
+ return NumBinopNode.compute_c_result_type(
+ self, type1, type2)
+
+ def py_operation_function(self, code):
+ type1, type2 = self.operand1.type, self.operand2.type
+
+ if type1 is unicode_type or type2 is unicode_type:
+ if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type):
+ is_unicode_concat = True
+ elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode):
+ # Assume that even if we don't know the second type, it's going to be a string.
+ is_unicode_concat = True
+ else:
+ # Operation depends on the second type.
+ is_unicode_concat = False
+
+ if is_unicode_concat:
+ if self.operand1.may_be_none() or self.operand2.may_be_none():
+ return '__Pyx_PyUnicode_ConcatSafe'
+ else:
+ return '__Pyx_PyUnicode_Concat'
+
+ return super(AddNode, self).py_operation_function(code)
+
+
+class SubNode(NumBinopNode):
+ # '-' operator.
+
+ def compute_c_result_type(self, type1, type2):
+ if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
+ return type1
+ elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
+ return PyrexTypes.c_ptrdiff_t_type
+ else:
+ return NumBinopNode.compute_c_result_type(
+ self, type1, type2)
+
+
+class MulNode(NumBinopNode):
+ # '*' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ if ((type1.is_string and type2.is_int) or
+ (type2.is_string and type1.is_int)):
+ return 1
+ else:
+ return NumBinopNode.is_py_operation_types(self, type1, type2)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # let's assume that whatever builtin type you multiply a string with
+ # will either return a string of the same type or fail with an exception
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ if type1 in string_types and type2.is_builtin_type:
+ return type1
+ if type2 in string_types and type1.is_builtin_type:
+ return type2
+ # multiplication of containers/numbers with an integer value
+ # always (?) returns the same type
+ if type1.is_int:
+ return type2
+ if type2.is_int:
+ return type1
+ return None
+
+
+class MatMultNode(NumBinopNode):
+ # '@' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ return True
+
+ def generate_evaluation_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
+ super(MatMultNode, self).generate_evaluation_code(code)
+
+
+class DivNode(NumBinopNode):
+ # '/' or '//' operator.
+
+ cdivision = None
+ truedivision = None # == "unknown" if operator == '/'
+ ctruedivision = False
+ cdivision_warnings = False
+ zerodivision_check = None
+
+ def find_compile_time_binary_operator(self, op1, op2):
+ func = compile_time_binary_operators[self.operator]
+ if self.operator == '/' and self.truedivision is None:
+ # => true div for floats, floor div for integers
+ if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
+ func = compile_time_binary_operators['//']
+ return func
+
+ def calculate_constant_result(self):
+ op1 = self.operand1.constant_result
+ op2 = self.operand2.constant_result
+ func = self.find_compile_time_binary_operator(op1, op2)
+ self.constant_result = func(
+ self.operand1.constant_result,
+ self.operand2.constant_result)
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ func = self.find_compile_time_binary_operator(
+ operand1, operand2)
+ return func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def _check_truedivision(self, env):
+ if self.cdivision or env.directives['cdivision']:
+ self.ctruedivision = False
+ else:
+ self.ctruedivision = self.truedivision
+
+ def infer_type(self, env):
+ self._check_truedivision(env)
+ return self.result_type(
+ self.operand1.infer_type(env),
+ self.operand2.infer_type(env), env)
+
+ def analyse_operation(self, env):
+ self._check_truedivision(env)
+ NumBinopNode.analyse_operation(self, env)
+ if self.is_cpp_operation():
+ self.cdivision = True
+ if not self.type.is_pyobject:
+ self.zerodivision_check = (
+ self.cdivision is None and not env.directives['cdivision']
+ and (not self.operand2.has_constant_result() or
+ self.operand2.constant_result == 0))
+ if self.zerodivision_check or env.directives['cdivision_warnings']:
+ # Need to check ahead of time to warn or raise zero division error
+ self.operand1 = self.operand1.coerce_to_simple(env)
+ self.operand2 = self.operand2.coerce_to_simple(env)
+
+ def compute_c_result_type(self, type1, type2):
+ if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class:
+ if not type1.is_float and not type2.is_float:
+ widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
+ widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
+ return widest_type
+ return NumBinopNode.compute_c_result_type(self, type1, type2)
+
+ def zero_division_message(self):
+ if self.type.is_int:
+ return "integer division or modulo by zero"
+ else:
+ return "float division"
+
+ def generate_evaluation_code(self, code):
+ if not self.type.is_pyobject and not self.type.is_complex:
+ if self.cdivision is None:
+ self.cdivision = (
+ code.globalstate.directives['cdivision']
+ or self.type.is_float
+ or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed)
+ )
+ if not self.cdivision:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
+ NumBinopNode.generate_evaluation_code(self, code)
+ self.generate_div_warning_code(code)
+
+ def generate_div_warning_code(self, code):
+ in_nogil = self.in_nogil_context
+ if not self.type.is_pyobject:
+ if self.zerodivision_check:
+ if not self.infix:
+ zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
+ else:
+ zero_test = "%s == 0" % self.operand2.result()
+ code.putln("if (unlikely(%s)) {" % zero_test)
+ if in_nogil:
+ code.put_ensure_gil()
+ code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
+ if in_nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ if self.type.is_int and self.type.signed and self.operator != '%':
+ code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
+ if self.operand2.type.signed == 2:
+ # explicitly signed, no runtime check needed
+ minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
+ else:
+ type_of_op2 = self.operand2.type.empty_declaration_code()
+ minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
+ type_of_op2, self.operand2.result(), type_of_op2)
+ code.putln("else if (sizeof(%s) == sizeof(long) && %s "
+ " && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
+ self.type.empty_declaration_code(),
+ minus1_check,
+ self.operand1.result()))
+ if in_nogil:
+ code.put_ensure_gil()
+ code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
+ if in_nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
+ code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
+ self.operand1.result(),
+ self.operand2.result()))
+ warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
+ 'FILENAME': Naming.filename_cname,
+ 'LINENO': Naming.lineno_cname,
+ }
+
+ if in_nogil:
+ result_code = 'result'
+ code.putln("int %s;" % result_code)
+ code.put_ensure_gil()
+ code.putln(code.set_error_info(self.pos, used=True))
+ code.putln("%s = %s;" % (result_code, warning_code))
+ code.put_release_ensured_gil()
+ else:
+ result_code = warning_code
+ code.putln(code.set_error_info(self.pos, used=True))
+
+ code.put("if (unlikely(%s)) " % result_code)
+ code.put_goto(code.error_label)
+ code.putln("}")
+
+ def calculate_result_code(self):
+ if self.type.is_complex or self.is_cpp_operation():
+ return NumBinopNode.calculate_result_code(self)
+ elif self.type.is_float and self.operator == '//':
+ return "floor(%s / %s)" % (
+ self.operand1.result(),
+ self.operand2.result())
+ elif self.truedivision or self.cdivision:
+ op1 = self.operand1.result()
+ op2 = self.operand2.result()
+ if self.truedivision:
+ if self.type != self.operand1.type:
+ op1 = self.type.cast_code(op1)
+ if self.type != self.operand2.type:
+ op2 = self.type.cast_code(op2)
+ return "(%s / %s)" % (op1, op2)
+ else:
+ return "__Pyx_div_%s(%s, %s)" % (
+ self.type.specialization_name(),
+ self.operand1.result(),
+ self.operand2.result())
+
+
+_find_formatting_types = re.compile(
+ br"%"
+ br"(?:%|" # %%
+ br"(?:\([^)]+\))?" # %(name)
+ br"[-+#,0-9 ]*([a-z])" # %.2f etc.
+ br")").findall
+
+# These format conversion types can never trigger a Unicode string conversion in Py2.
+_safe_bytes_formats = set([
+ # Excludes 's' and 'r', which can generate non-bytes strings.
+ b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
+])
+
+
+class ModNode(DivNode):
+ # '%' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ return (type1.is_string
+ or type2.is_string
+ or NumBinopNode.is_py_operation_types(self, type1, type2))
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
+ if type1 is unicode_type:
+ # None + xyz may be implemented by RHS
+ if type2.is_builtin_type or not self.operand1.may_be_none():
+ return type1
+ elif type1 in (bytes_type, str_type, basestring_type):
+ if type2 is unicode_type:
+ return type2
+ elif type2.is_numeric:
+ return type1
+ elif self.operand1.is_string_literal:
+ if type1 is str_type or type1 is bytes_type:
+ if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
+ return type1
+ return basestring_type
+ elif type1 is bytes_type and not type2.is_builtin_type:
+ return None # RHS might implement '% operator differently in Py3
+ else:
+ return basestring_type # either str or unicode, can't tell
+ return None
+
+ def zero_division_message(self):
+ if self.type.is_int:
+ return "integer division or modulo by zero"
+ else:
+ return "float divmod()"
+
+ def analyse_operation(self, env):
+ DivNode.analyse_operation(self, env)
+ if not self.type.is_pyobject:
+ if self.cdivision is None:
+ self.cdivision = env.directives['cdivision'] or not self.type.signed
+ if not self.cdivision and not self.type.is_int and not self.type.is_float:
+ error(self.pos, "mod operator not supported for type '%s'" % self.type)
+
+ def generate_evaluation_code(self, code):
+ if not self.type.is_pyobject and not self.cdivision:
+ if self.type.is_int:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
+ else: # float
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
+ self.type, math_h_modifier=self.type.math_h_modifier))
+ # NOTE: skipping over DivNode here
+ NumBinopNode.generate_evaluation_code(self, code)
+ self.generate_div_warning_code(code)
+
+ def calculate_result_code(self):
+ if self.cdivision:
+ if self.type.is_float:
+ return "fmod%s(%s, %s)" % (
+ self.type.math_h_modifier,
+ self.operand1.result(),
+ self.operand2.result())
+ else:
+ return "(%s %% %s)" % (
+ self.operand1.result(),
+ self.operand2.result())
+ else:
+ return "__Pyx_mod_%s(%s, %s)" % (
+ self.type.specialization_name(),
+ self.operand1.result(),
+ self.operand2.result())
+
+ def py_operation_function(self, code):
+ type1, type2 = self.operand1.type, self.operand2.type
+ # ("..." % x) must call "x.__rmod__()" for string subtypes.
+ if type1 is unicode_type:
+ if self.operand1.may_be_none() or (
+ type2.is_extension_type and type2.subtype_of(type1) or
+ type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
+ return '__Pyx_PyUnicode_FormatSafe'
+ else:
+ return 'PyUnicode_Format'
+ elif type1 is str_type:
+ if self.operand1.may_be_none() or (
+ type2.is_extension_type and type2.subtype_of(type1) or
+ type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
+ return '__Pyx_PyString_FormatSafe'
+ else:
+ return '__Pyx_PyString_Format'
+ return super(ModNode, self).py_operation_function(code)
+
+
+class PowNode(NumBinopNode):
+ # '**' operator.
+
+ def analyse_types(self, env):
+ if not env.directives['cpow']:
+ # Note - the check here won't catch cpow directives that don't use '**'
+ # but that's probably OK for a placeholder forward compatibility directive
+ error(self.pos, "The 'cpow' directive is provided for forward compatibility "
+ "and must be True")
+ return super(PowNode, self).analyse_types(env)
+
+ def analyse_c_operation(self, env):
+ NumBinopNode.analyse_c_operation(self, env)
+ if self.type.is_complex:
+ if self.type.real_type.is_float:
+ self.operand1 = self.operand1.coerce_to(self.type, env)
+ self.operand2 = self.operand2.coerce_to(self.type, env)
+ self.pow_func = self.type.binary_op('**')
+ else:
+ error(self.pos, "complex int powers not supported")
+ self.pow_func = "<error>"
+ elif self.type.is_float:
+ self.pow_func = "pow" + self.type.math_h_modifier
+ elif self.type.is_int:
+ self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
+ env.use_utility_code(
+ UtilityCode.load_cached("IntPow", "CMath.c").specialize(
+ func_name=self.pow_func,
+ type=self.type.empty_declaration_code(),
+ signed=self.type.signed and 1 or 0))
+ elif not self.type.is_error:
+ error(self.pos, "got unexpected types for C power operator: %s, %s" %
+ (self.operand1.type, self.operand2.type))
+
+ def calculate_result_code(self):
+ # Work around MSVC overloading ambiguity.
+ def typecast(operand):
+ if self.type == operand.type:
+ return operand.result()
+ else:
+ return self.type.cast_code(operand.result())
+ return "%s(%s, %s)" % (
+ self.pow_func,
+ typecast(self.operand1),
+ typecast(self.operand2))
+
+ def py_operation_function(self, code):
+ if (self.type.is_pyobject and
+ self.operand1.constant_result == 2 and
+ isinstance(self.operand1.constant_result, _py_int_types) and
+ self.operand2.type is py_object_type):
+ code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
+ if self.inplace:
+ return '__Pyx_PyNumber_InPlacePowerOf2'
+ else:
+ return '__Pyx_PyNumber_PowerOf2'
+ return super(PowNode, self).py_operation_function(code)
+
+
+class BoolBinopNode(ExprNode):
+ """
+ Short-circuiting boolean operation.
+
+ Note that this node provides the same code generation method as
+ BoolBinopResultNode to simplify expression nesting.
+
+ operator string "and"/"or"
+ operand1 BoolBinopNode/BoolBinopResultNode left operand
+ operand2 BoolBinopNode/BoolBinopResultNode right operand
+ """
+ subexprs = ['operand1', 'operand2']
+ is_temp = True
+ operator = None
+ operand1 = None
+ operand2 = None
+
+ def infer_type(self, env):
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+ return PyrexTypes.independent_spanning_type(type1, type2)
+
+ def may_be_none(self):
+ if self.operator == 'or':
+ return self.operand2.may_be_none()
+ else:
+ return self.operand1.may_be_none() or self.operand2.may_be_none()
+
+ def calculate_constant_result(self):
+ operand1 = self.operand1.constant_result
+ operand2 = self.operand2.constant_result
+ if self.operator == 'and':
+ self.constant_result = operand1 and operand2
+ else:
+ self.constant_result = operand1 or operand2
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ if self.operator == 'and':
+ return operand1 and operand2
+ else:
+ return operand1 or operand2
+
+ def is_ephemeral(self):
+ return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
+
+ def analyse_types(self, env):
+ # Note: we do not do any coercion here as we most likely do not know the final type anyway.
+ # We even accept to set self.type to ErrorType if both operands do not have a spanning type.
+ # The coercion to the final type and to a "simple" value is left to coerce_to().
+ operand1 = self.operand1.analyse_types(env)
+ operand2 = self.operand2.analyse_types(env)
+ self.type = PyrexTypes.independent_spanning_type(
+ operand1.type, operand2.type)
+ self.operand1 = self._wrap_operand(operand1, env)
+ self.operand2 = self._wrap_operand(operand2, env)
+ return self
+
+ def _wrap_operand(self, operand, env):
+ if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
+ operand = BoolBinopResultNode(operand, self.type, env)
+ return operand
+
+ def wrap_operands(self, env):
+ """
+ Must get called by transforms that want to create a correct BoolBinopNode
+ after the type analysis phase.
+ """
+ self.operand1 = self._wrap_operand(self.operand1, env)
+ self.operand2 = self._wrap_operand(self.operand2, env)
+
+ def coerce_to_boolean(self, env):
+ return self.coerce_to(PyrexTypes.c_bint_type, env)
+
+ def coerce_to(self, dst_type, env):
+ operand1 = self.operand1.coerce_to(dst_type, env)
+ operand2 = self.operand2.coerce_to(dst_type, env)
+ return BoolBinopNode.from_node(
+ self, type=dst_type,
+ operator=self.operator,
+ operand1=operand1, operand2=operand2)
+
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ code.mark_pos(self.pos)
+
+ outer_labels = (and_label, or_label)
+ if self.operator == 'and':
+ my_label = and_label = code.new_label('next_and')
+ else:
+ my_label = or_label = code.new_label('next_or')
+ self.operand1.generate_bool_evaluation_code(
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
+
+ and_label, or_label = outer_labels
+
+ code.put_label(my_label)
+ self.operand2.generate_bool_evaluation_code(
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
+
+ def generate_evaluation_code(self, code):
+ self.allocate_temp_result(code)
+ result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
+ or_label = and_label = None
+ end_label = code.new_label('bool_binop_done')
+ self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
+ code.put_label(end_label)
+
+ gil_message = "Truth-testing Python object"
+
+ def check_const(self):
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def generate_subexpr_disposal_code(self, code):
+ pass # nothing to do here, all done in generate_evaluation_code()
+
+ def free_subexpr_temps(self, code):
+ pass # nothing to do here, all done in generate_evaluation_code()
+
+ def generate_operand1_test(self, code):
+ # Generate code to test the truth of the first operand.
+ if self.type.is_pyobject:
+ test_result = code.funcstate.allocate_temp(
+ PyrexTypes.c_bint_type, manage_ref=False)
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ test_result,
+ self.operand1.py_result(),
+ code.error_goto_if_neg(test_result, self.pos)))
+ else:
+ test_result = self.operand1.result()
+ return (test_result, self.type.is_pyobject)
+
+
+class BoolBinopResultNode(ExprNode):
+ """
+ Intermediate result of a short-circuiting and/or expression.
+ Tests the result for 'truthiness' and takes care of coercing the final result
+ of the overall expression to the target type.
+
+ Note that this node provides the same code generation method as
+ BoolBinopNode to simplify expression nesting.
+
+ arg ExprNode the argument to test
+ value ExprNode the coerced result value node
+ """
+
+ subexprs = ['arg', 'value']
+ is_temp = True
+ arg = None
+ value = None
+
+ def __init__(self, arg, result_type, env):
+ # using 'arg' multiple times, so it must be a simple/temp value
+ arg = arg.coerce_to_simple(env)
+ # wrap in ProxyNode, in case a transform wants to replace self.arg later
+ arg = ProxyNode(arg)
+ super(BoolBinopResultNode, self).__init__(
+ arg.pos, arg=arg, type=result_type,
+ value=CloneNode(arg).coerce_to(result_type, env))
+
+ def coerce_to_boolean(self, env):
+ return self.coerce_to(PyrexTypes.c_bint_type, env)
+
+ def coerce_to(self, dst_type, env):
+ # unwrap, coerce, rewrap
+ arg = self.arg.arg
+ if dst_type is PyrexTypes.c_bint_type:
+ arg = arg.coerce_to_boolean(env)
+ # TODO: unwrap more coercion nodes?
+ return BoolBinopResultNode(arg, dst_type, env)
+
+ def nogil_check(self, env):
+ # let's leave all errors to BoolBinopNode
+ pass
+
+ def generate_operand_test(self, code):
+ # Generate code to test the truth of the first operand.
+ if self.arg.type.is_pyobject:
+ test_result = code.funcstate.allocate_temp(
+ PyrexTypes.c_bint_type, manage_ref=False)
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ test_result,
+ self.arg.py_result(),
+ code.error_goto_if_neg(test_result, self.pos)))
+ else:
+ test_result = self.arg.result()
+ return (test_result, self.arg.type.is_pyobject)
+
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ code.mark_pos(self.pos)
+
+ # x => x
+ # x and ... or ... => next 'and' / 'or'
+ # False ... or x => next 'or'
+ # True and x => next 'and'
+ # True or x => True (operand)
+
+ self.arg.generate_evaluation_code(code)
+ if and_label or or_label:
+ test_result, uses_temp = self.generate_operand_test(code)
+ if uses_temp and (and_label and or_label):
+ # cannot become final result => free early
+ # disposal: uses_temp and (and_label and or_label)
+ self.arg.generate_disposal_code(code)
+ sense = '!' if or_label else ''
+ code.putln("if (%s%s) {" % (sense, test_result))
+ if uses_temp:
+ code.funcstate.release_temp(test_result)
+ if not uses_temp or not (and_label and or_label):
+ # disposal: (not uses_temp) or {not (and_label and or_label) [if]}
+ self.arg.generate_disposal_code(code)
+
+ if or_label and or_label != fall_through:
+ # value is false => short-circuit to next 'or'
+ code.put_goto(or_label)
+ if and_label:
+ # value is true => go to next 'and'
+ if or_label:
+ code.putln("} else {")
+ if not uses_temp:
+ # disposal: (not uses_temp) and {(and_label and or_label) [else]}
+ self.arg.generate_disposal_code(code)
+ if and_label != fall_through:
+ code.put_goto(and_label)
+
+ if not and_label or not or_label:
+ # if no next 'and' or 'or', we provide the result
+ if and_label or or_label:
+ code.putln("} else {")
+ self.value.generate_evaluation_code(code)
+ self.value.make_owned_reference(code)
+ code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
+ self.value.generate_post_assignment_code(code)
+ # disposal: {not (and_label and or_label) [else]}
+ self.arg.generate_disposal_code(code)
+ self.value.free_temps(code)
+ if end_label != fall_through:
+ code.put_goto(end_label)
+
+ if and_label or or_label:
+ code.putln("}")
+ self.arg.free_temps(code)
+
+
+class CondExprNode(ExprNode):
+ # Short-circuiting conditional expression.
+ #
+ # test ExprNode
+ # true_val ExprNode
+ # false_val ExprNode
+
+ true_val = None
+ false_val = None
+ is_temp = True
+
+ subexprs = ['test', 'true_val', 'false_val']
+
+ def type_dependencies(self, env):
+ return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
+
+ def infer_type(self, env):
+ return PyrexTypes.independent_spanning_type(
+ self.true_val.infer_type(env),
+ self.false_val.infer_type(env))
+
+ def calculate_constant_result(self):
+ if self.test.constant_result:
+ self.constant_result = self.true_val.constant_result
+ else:
+ self.constant_result = self.false_val.constant_result
+
+ def is_ephemeral(self):
+ return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
+
+ def analyse_types(self, env):
+ self.test = self.test.analyse_types(env).coerce_to_boolean(env)
+ self.true_val = self.true_val.analyse_types(env)
+ self.false_val = self.false_val.analyse_types(env)
+ return self.analyse_result_type(env)
+
+ def analyse_result_type(self, env):
+ true_val_type = self.true_val.type
+ false_val_type = self.false_val.type
+ self.type = PyrexTypes.independent_spanning_type(true_val_type, false_val_type)
+
+ if self.type.is_reference:
+ self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+ elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
+ error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
+
+ if true_val_type.is_pyobject or false_val_type.is_pyobject:
+ if true_val_type != self.type:
+ self.true_val = self.true_val.coerce_to(self.type, env)
+ if false_val_type != self.type:
+ self.false_val = self.false_val.coerce_to(self.type, env)
+
+ if self.type.is_error:
+ self.type_error()
+ return self
+
+ def coerce_to_integer(self, env):
+ if not self.true_val.type.is_int:
+ self.true_val = self.true_val.coerce_to_integer(env)
+ if not self.false_val.type.is_int:
+ self.false_val = self.false_val.coerce_to_integer(env)
+ self.result_ctype = None
+ return self.analyse_result_type(env)
+
+ def coerce_to(self, dst_type, env):
+ if self.true_val.type != dst_type:
+ self.true_val = self.true_val.coerce_to(dst_type, env)
+ if self.false_val.type != dst_type:
+ self.false_val = self.false_val.coerce_to(dst_type, env)
+ self.result_ctype = None
+ return self.analyse_result_type(env)
+
+ def type_error(self):
+ if not (self.true_val.type.is_error or self.false_val.type.is_error):
+ error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
+ (self.true_val.type, self.false_val.type))
+ self.type = PyrexTypes.error_type
+
+ def check_const(self):
+ return (self.test.check_const()
+ and self.true_val.check_const()
+ and self.false_val.check_const())
+
+ def generate_evaluation_code(self, code):
+ # Because subexprs may not be evaluated we can use a more optimal
+ # subexpr allocation strategy than the default, so override evaluation_code.
+
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+ self.test.generate_evaluation_code(code)
+ code.putln("if (%s) {" % self.test.result())
+ self.eval_and_get(code, self.true_val)
+ code.putln("} else {")
+ self.eval_and_get(code, self.false_val)
+ code.putln("}")
+ self.test.generate_disposal_code(code)
+ self.test.free_temps(code)
+
+ def eval_and_get(self, code, expr):
+ expr.generate_evaluation_code(code)
+ if self.type.is_memoryviewslice:
+ expr.make_owned_memoryviewslice(code)
+ else:
+ expr.make_owned_reference(code)
+ code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
+ expr.generate_post_assignment_code(code)
+ expr.free_temps(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ pass # done explicitly above (cleanup must separately happen within the if/else blocks)
+
+ def free_subexpr_temps(self, code):
+ pass # done explicitly above (cleanup must separately happen within the if/else blocks)
+
+
+richcmp_constants = {
+ "<" : "Py_LT",
+ "<=": "Py_LE",
+ "==": "Py_EQ",
+ "!=": "Py_NE",
+ "<>": "Py_NE",
+ ">" : "Py_GT",
+ ">=": "Py_GE",
+ # the following are faked by special compare functions
+ "in" : "Py_EQ",
+ "not_in": "Py_NE",
+}
+
+class CmpNode(object):
+ # Mixin class containing code common to PrimaryCmpNodes
+ # and CascadedCmpNodes.
+
+ special_bool_cmp_function = None
+ special_bool_cmp_utility_code = None
+
+ def infer_type(self, env):
+ # TODO: Actually implement this (after merging with -unstable).
+ return py_object_type
+
+ def calculate_cascaded_constant_result(self, operand1_result):
+ func = compile_time_binary_operators[self.operator]
+ operand2_result = self.operand2.constant_result
+ if (isinstance(operand1_result, any_string_type) and
+ isinstance(operand2_result, any_string_type) and
+ type(operand1_result) != type(operand2_result)):
+ # string comparison of different types isn't portable
+ return
+
+ if self.operator in ('in', 'not_in'):
+ if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
+ if not self.operand2.args:
+ self.constant_result = self.operator == 'not_in'
+ return
+ elif isinstance(self.operand2, ListNode) and not self.cascade:
+ # tuples are more efficient to store than lists
+ self.operand2 = self.operand2.as_tuple()
+ elif isinstance(self.operand2, DictNode):
+ if not self.operand2.key_value_pairs:
+ self.constant_result = self.operator == 'not_in'
+ return
+
+ self.constant_result = func(operand1_result, operand2_result)
+
+ def cascaded_compile_time_value(self, operand1, denv):
+ func = get_compile_time_binop(self)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ result = func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+ result = None
+ if result:
+ cascade = self.cascade
+ if cascade:
+ result = result and cascade.cascaded_compile_time_value(operand2, denv)
+ return result
+
+ def is_cpp_comparison(self):
+ return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
+
+ def find_common_int_type(self, env, op, operand1, operand2):
+ # type1 != type2 and at least one of the types is not a C int
+ type1 = operand1.type
+ type2 = operand2.type
+ type1_can_be_int = False
+ type2_can_be_int = False
+
+ if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
+ type1_can_be_int = True
+ if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
+ type2_can_be_int = True
+
+ if type1.is_int:
+ if type2_can_be_int:
+ return type1
+ elif type2.is_int:
+ if type1_can_be_int:
+ return type2
+ elif type1_can_be_int:
+ if type2_can_be_int:
+ if Builtin.unicode_type in (type1, type2):
+ return PyrexTypes.c_py_ucs4_type
+ else:
+ return PyrexTypes.c_uchar_type
+
+ return None
+
+ def find_common_type(self, env, op, operand1, common_type=None):
+ operand2 = self.operand2
+ type1 = operand1.type
+ type2 = operand2.type
+
+ new_common_type = None
+
+ # catch general errors
+ if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
+ type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
+ error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
+ new_common_type = error_type
+
+ # try to use numeric comparisons where possible
+ elif type1.is_complex or type2.is_complex:
+ if (op not in ('==', '!=')
+ and (type1.is_complex or type1.is_numeric)
+ and (type2.is_complex or type2.is_numeric)):
+ error(self.pos, "complex types are unordered")
+ new_common_type = error_type
+ elif type1.is_pyobject:
+ new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
+ elif type2.is_pyobject:
+ new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
+ else:
+ new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ elif type1.is_numeric and type2.is_numeric:
+ new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ elif common_type is None or not common_type.is_pyobject:
+ new_common_type = self.find_common_int_type(env, op, operand1, operand2)
+
+ if new_common_type is None:
+ # fall back to generic type compatibility tests
+ if type1.is_ctuple or type2.is_ctuple:
+ new_common_type = py_object_type
+ elif type1 == type2:
+ new_common_type = type1
+ elif type1.is_pyobject or type2.is_pyobject:
+ if type2.is_numeric or type2.is_string:
+ if operand2.check_for_coercion_error(type1, env):
+ new_common_type = error_type
+ else:
+ new_common_type = py_object_type
+ elif type1.is_numeric or type1.is_string:
+ if operand1.check_for_coercion_error(type2, env):
+ new_common_type = error_type
+ else:
+ new_common_type = py_object_type
+ elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
+ new_common_type = py_object_type
+ else:
+ # one Python type and one non-Python type, not assignable
+ self.invalid_types_error(operand1, op, operand2)
+ new_common_type = error_type
+ elif type1.assignable_from(type2):
+ new_common_type = type1
+ elif type2.assignable_from(type1):
+ new_common_type = type2
+ else:
+ # C types that we couldn't handle up to here are an error
+ self.invalid_types_error(operand1, op, operand2)
+ new_common_type = error_type
+
+ if new_common_type.is_string and (isinstance(operand1, BytesNode) or
+ isinstance(operand2, BytesNode)):
+ # special case when comparing char* to bytes literal: must
+ # compare string values!
+ new_common_type = bytes_type
+
+ # recursively merge types
+ if common_type is None or new_common_type.is_error:
+ common_type = new_common_type
+ else:
+ # we could do a lot better by splitting the comparison
+ # into a non-Python part and a Python part, but this is
+ # safer for now
+ common_type = PyrexTypes.spanning_type(common_type, new_common_type)
+
+ if self.cascade:
+ common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
+
+ return common_type
+
+ def invalid_types_error(self, operand1, op, operand2):
+ error(self.pos, "Invalid types for '%s' (%s, %s)" %
+ (op, operand1.type, operand2.type))
+
+ def is_python_comparison(self):
+ return (not self.is_ptr_contains()
+ and not self.is_c_string_contains()
+ and (self.has_python_operands()
+ or (self.cascade and self.cascade.is_python_comparison())
+ or self.operator in ('in', 'not_in')))
+
+ def coerce_operands_to(self, dst_type, env):
+ operand2 = self.operand2
+ if operand2.type != dst_type:
+ self.operand2 = operand2.coerce_to(dst_type, env)
+ if self.cascade:
+ self.cascade.coerce_operands_to(dst_type, env)
+
+ def is_python_result(self):
+ return ((self.has_python_operands() and
+ self.special_bool_cmp_function is None and
+ self.operator not in ('is', 'is_not', 'in', 'not_in') and
+ not self.is_c_string_contains() and
+ not self.is_ptr_contains())
+ or (self.cascade and self.cascade.is_python_result()))
+
+ def is_c_string_contains(self):
+ return self.operator in ('in', 'not_in') and \
+ ((self.operand1.type.is_int
+ and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
+ (self.operand1.type.is_unicode_char
+ and self.operand2.type is unicode_type))
+
+ def is_ptr_contains(self):
+ if self.operator in ('in', 'not_in'):
+ container_type = self.operand2.type
+ return (container_type.is_ptr or container_type.is_array) \
+ and not container_type.is_string
+
+ def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
+ # note: currently operand1 must get coerced to a Python object if we succeed here!
+ if self.operator in ('==', '!='):
+ type1, type2 = operand1.type, self.operand2.type
+ if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
+ if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
+ return True
+ elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
+ return True
+ elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
+ return True
+ elif type1 is Builtin.str_type or type2 is Builtin.str_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyString_Equals"
+ return True
+ elif self.operator in ('in', 'not_in'):
+ if self.operand2.type is Builtin.dict_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
+ return True
+ elif self.operand2.type is Builtin.set_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
+ return True
+ elif self.operand2.type is Builtin.unicode_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
+ return True
+ else:
+ if not self.operand2.type.is_pyobject:
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
+ return True
+ return False
+
+ def generate_operation_code(self, code, result_code,
+ operand1, op , operand2):
+ if self.type.is_pyobject:
+ error_clause = code.error_goto_if_null
+ got_ref = "__Pyx_XGOTREF(%s); " % result_code
+ if self.special_bool_cmp_function:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
+ coerce_result = "__Pyx_PyBoolOrNull_FromLong"
+ else:
+ coerce_result = "__Pyx_PyBool_FromLong"
+ else:
+ error_clause = code.error_goto_if_neg
+ got_ref = ""
+ coerce_result = ""
+
+ if self.special_bool_cmp_function:
+ if operand1.type.is_pyobject:
+ result1 = operand1.py_result()
+ else:
+ result1 = operand1.result()
+ if operand2.type.is_pyobject:
+ result2 = operand2.py_result()
+ else:
+ result2 = operand2.result()
+ if self.special_bool_cmp_utility_code:
+ code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
+ code.putln(
+ "%s = %s(%s(%s, %s, %s)); %s%s" % (
+ result_code,
+ coerce_result,
+ self.special_bool_cmp_function,
+ result1, result2, richcmp_constants[op],
+ got_ref,
+ error_clause(result_code, self.pos)))
+
+ elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
+ assert op not in ('in', 'not_in'), op
+ code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
+ result_code,
+ operand1.py_result(),
+ operand2.py_result(),
+ richcmp_constants[op],
+ got_ref,
+ error_clause(result_code, self.pos)))
+
+ elif operand1.type.is_complex:
+ code.putln("%s = %s(%s%s(%s, %s));" % (
+ result_code,
+ coerce_result,
+ op == "!=" and "!" or "",
+ operand1.type.unary_op('eq'),
+ operand1.result(),
+ operand2.result()))
+
+ else:
+ type1 = operand1.type
+ type2 = operand2.type
+ if (type1.is_extension_type or type2.is_extension_type) \
+ and not type1.same_as(type2):
+ common_type = py_object_type
+ elif type1.is_numeric:
+ common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ else:
+ common_type = type1
+ code1 = operand1.result_as(common_type)
+ code2 = operand2.result_as(common_type)
+ statement = "%s = %s(%s %s %s);" % (
+ result_code,
+ coerce_result,
+ code1,
+ self.c_operator(op),
+ code2)
+ if self.is_cpp_comparison() and self.exception_check == '+':
+ translate_cpp_exception(
+ code,
+ self.pos,
+ statement,
+ result_code if self.type.is_pyobject else None,
+ self.exception_value,
+ self.in_nogil_context)
+ else:
+ code.putln(statement)
+
+ def c_operator(self, op):
+ if op == 'is':
+ return "=="
+ elif op == 'is_not':
+ return "!="
+ else:
+ return op
+
+class PrimaryCmpNode(ExprNode, CmpNode):
+ # Non-cascaded comparison or first comparison of
+ # a cascaded sequence.
+ #
+ # operator string
+ # operand1 ExprNode
+ # operand2 ExprNode
+ # cascade CascadedCmpNode
+
+ # We don't use the subexprs mechanism, because
+ # things here are too complicated for it to handle.
+ # Instead, we override all the framework methods
+ # which use it.
+
+ child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
+
+ cascade = None
+ coerced_operand2 = None
+ is_memslice_nonecheck = False
+
+ def infer_type(self, env):
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+
+ # TODO: implement this for other types.
+ return py_object_type
+
+ def type_dependencies(self, env):
+ return ()
+
+ def calculate_constant_result(self):
+ assert not self.cascade
+ self.calculate_cascaded_constant_result(self.operand1.constant_result)
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ return self.cascaded_compile_time_value(operand1, denv)
+
+ def analyse_types(self, env):
+ self.operand1 = self.operand1.analyse_types(env)
+ self.operand2 = self.operand2.analyse_types(env)
+ if self.is_cpp_comparison():
+ self.analyse_cpp_comparison(env)
+ if self.cascade:
+ error(self.pos, "Cascading comparison not yet supported for cpp types.")
+ return self
+
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ self.is_pycmp = False
+ return self
+
+ if self.analyse_memoryviewslice_comparison(env):
+ return self
+
+ if self.cascade:
+ self.cascade = self.cascade.analyse_types(env)
+
+ if self.operator in ('in', 'not_in'):
+ if self.is_c_string_contains():
+ self.is_pycmp = False
+ common_type = None
+ if self.cascade:
+ error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
+ return self
+ if self.operand2.type is unicode_type:
+ env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
+ else:
+ if self.operand1.type is PyrexTypes.c_uchar_type:
+ self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
+ if self.operand2.type is not bytes_type:
+ self.operand2 = self.operand2.coerce_to(bytes_type, env)
+ env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
+ self.operand2 = self.operand2.as_none_safe_node(
+ "argument of type 'NoneType' is not iterable")
+ elif self.is_ptr_contains():
+ if self.cascade:
+ error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
+ self.type = PyrexTypes.c_bint_type
+ # Will be transformed by IterationTransform
+ return self
+ elif self.find_special_bool_compare_function(env, self.operand1):
+ if not self.operand1.type.is_pyobject:
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ common_type = None # if coercion needed, the method call above has already done it
+ self.is_pycmp = False # result is bint
+ else:
+ common_type = py_object_type
+ self.is_pycmp = True
+ elif self.find_special_bool_compare_function(env, self.operand1):
+ if not self.operand1.type.is_pyobject:
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ common_type = None # if coercion needed, the method call above has already done it
+ self.is_pycmp = False # result is bint
+ else:
+ common_type = self.find_common_type(env, self.operator, self.operand1)
+ self.is_pycmp = common_type.is_pyobject
+
+ if common_type is not None and not common_type.is_error:
+ if self.operand1.type != common_type:
+ self.operand1 = self.operand1.coerce_to(common_type, env)
+ self.coerce_operands_to(common_type, env)
+
+ if self.cascade:
+ self.operand2 = self.operand2.coerce_to_simple(env)
+ self.cascade.coerce_cascaded_operands_to_temp(env)
+ operand2 = self.cascade.optimise_comparison(self.operand2, env)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ if self.is_python_result():
+ self.type = PyrexTypes.py_object_type
+ else:
+ self.type = PyrexTypes.c_bint_type
+ cdr = self.cascade
+ while cdr:
+ cdr.type = self.type
+ cdr = cdr.cascade
+ if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
+ # 1) owned reference, 2) reused value, 3) potential function error return value
+ self.is_temp = 1
+ return self
+
+ def analyse_cpp_comparison(self, env):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ self.is_pycmp = False
+ entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
+ if entry is None:
+ error(self.pos, "Invalid types for '%s' (%s, %s)" %
+ (self.operator, type1, type2))
+ self.type = PyrexTypes.error_type
+ self.result_code = "<error>"
+ return
+ func_type = entry.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check == '+':
+ self.is_temp = True
+ if self.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ if len(func_type.args) == 1:
+ self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
+ else:
+ self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
+ self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
+ self.type = func_type.return_type
+
+ def analyse_memoryviewslice_comparison(self, env):
+ have_none = self.operand1.is_none or self.operand2.is_none
+ have_slice = (self.operand1.type.is_memoryviewslice or
+ self.operand2.type.is_memoryviewslice)
+ ops = ('==', '!=', 'is', 'is_not')
+ if have_slice and have_none and self.operator in ops:
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ self.is_memslice_nonecheck = True
+ return True
+
+ return False
+
+ def coerce_to_boolean(self, env):
+ if self.is_pycmp:
+ # coercing to bool => may allow for more efficient comparison code
+ if self.find_special_bool_compare_function(
+ env, self.operand1, result_is_bool=True):
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ self.is_temp = 1
+ if self.cascade:
+ operand2 = self.cascade.optimise_comparison(
+ self.operand2, env, result_is_bool=True)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ return self
+ # TODO: check if we can optimise parts of the cascade here
+ return ExprNode.coerce_to_boolean(self, env)
+
+ def has_python_operands(self):
+ return (self.operand1.type.is_pyobject
+ or self.operand2.type.is_pyobject)
+
+ def check_const(self):
+ if self.cascade:
+ self.not_const()
+ return False
+ else:
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def calculate_result_code(self):
+ operand1, operand2 = self.operand1, self.operand2
+ if operand1.type.is_complex:
+ if self.operator == "!=":
+ negation = "!"
+ else:
+ negation = ""
+ return "(%s%s(%s, %s))" % (
+ negation,
+ operand1.type.binary_op('=='),
+ operand1.result(),
+ operand2.result())
+ elif self.is_c_string_contains():
+ if operand2.type is unicode_type:
+ method = "__Pyx_UnicodeContainsUCS4"
+ else:
+ method = "__Pyx_BytesContains"
+ if self.operator == "not_in":
+ negation = "!"
+ else:
+ negation = ""
+ return "(%s%s(%s, %s))" % (
+ negation,
+ method,
+ operand2.result(),
+ operand1.result())
+ else:
+ if is_pythran_expr(self.type):
+ result1, result2 = operand1.pythran_result(), operand2.pythran_result()
+ else:
+ result1, result2 = operand1.result(), operand2.result()
+ if self.is_memslice_nonecheck:
+ if operand1.type.is_memoryviewslice:
+ result1 = "((PyObject *) %s.memview)" % result1
+ else:
+ result2 = "((PyObject *) %s.memview)" % result2
+
+ return "(%s %s %s)" % (
+ result1,
+ self.c_operator(self.operator),
+ result2)
+
+ def generate_evaluation_code(self, code):
+ self.operand1.generate_evaluation_code(code)
+ self.operand2.generate_evaluation_code(code)
+ if self.is_temp:
+ self.allocate_temp_result(code)
+ self.generate_operation_code(code, self.result(),
+ self.operand1, self.operator, self.operand2)
+ if self.cascade:
+ self.cascade.generate_evaluation_code(
+ code, self.result(), self.coerced_operand2 or self.operand2,
+ needs_evaluation=self.coerced_operand2 is not None)
+ self.operand1.generate_disposal_code(code)
+ self.operand1.free_temps(code)
+ self.operand2.generate_disposal_code(code)
+ self.operand2.free_temps(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ # If this is called, it is a non-cascaded cmp,
+ # so only need to dispose of the two main operands.
+ self.operand1.generate_disposal_code(code)
+ self.operand2.generate_disposal_code(code)
+
+ def free_subexpr_temps(self, code):
+ # If this is called, it is a non-cascaded cmp,
+ # so only need to dispose of the two main operands.
+ self.operand1.free_temps(code)
+ self.operand2.free_temps(code)
+
+ def annotate(self, code):
+ self.operand1.annotate(code)
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
+
+
+class CascadedCmpNode(Node, CmpNode):
+ # A CascadedCmpNode is not a complete expression node. It
+ # hangs off the side of another comparison node, shares
+ # its left operand with that node, and shares its result
+ # with the PrimaryCmpNode at the head of the chain.
+ #
+ # operator string
+ # operand2 ExprNode
+ # cascade CascadedCmpNode
+
+ child_attrs = ['operand2', 'coerced_operand2', 'cascade']
+
+ cascade = None
+ coerced_operand2 = None
+ constant_result = constant_value_not_set # FIXME: where to calculate this?
+
+ def infer_type(self, env):
+ # TODO: Actually implement this (after merging with -unstable).
+ return py_object_type
+
+ def type_dependencies(self, env):
+ return ()
+
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
+ def analyse_types(self, env):
+ self.operand2 = self.operand2.analyse_types(env)
+ if self.cascade:
+ self.cascade = self.cascade.analyse_types(env)
+ return self
+
+ def has_python_operands(self):
+ return self.operand2.type.is_pyobject
+
+ def is_cpp_comparison(self):
+ # cascaded comparisons aren't currently implemented for c++ classes.
+ return False
+
+ def optimise_comparison(self, operand1, env, result_is_bool=False):
+ if self.find_special_bool_compare_function(env, operand1, result_is_bool):
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ if not operand1.type.is_pyobject:
+ operand1 = operand1.coerce_to_pyobject(env)
+ if self.cascade:
+ operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ return operand1
+
+ def coerce_operands_to_pyobjects(self, env):
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+ if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ if self.cascade:
+ self.cascade.coerce_operands_to_pyobjects(env)
+
+ def coerce_cascaded_operands_to_temp(self, env):
+ if self.cascade:
+ #self.operand2 = self.operand2.coerce_to_temp(env) #CTT
+ self.operand2 = self.operand2.coerce_to_simple(env)
+ self.cascade.coerce_cascaded_operands_to_temp(env)
+
+ def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
+ if self.type.is_pyobject:
+ code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
+ code.put_decref(result, self.type)
+ else:
+ code.putln("if (%s) {" % result)
+ if needs_evaluation:
+ operand1.generate_evaluation_code(code)
+ self.operand2.generate_evaluation_code(code)
+ self.generate_operation_code(code, result,
+ operand1, self.operator, self.operand2)
+ if self.cascade:
+ self.cascade.generate_evaluation_code(
+ code, result, self.coerced_operand2 or self.operand2,
+ needs_evaluation=self.coerced_operand2 is not None)
+ if needs_evaluation:
+ operand1.generate_disposal_code(code)
+ operand1.free_temps(code)
+ # Cascaded cmp result is always temp
+ self.operand2.generate_disposal_code(code)
+ self.operand2.free_temps(code)
+ code.putln("}")
+
+ def annotate(self, code):
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
+
+
+binop_node_classes = {
+ "or": BoolBinopNode,
+ "and": BoolBinopNode,
+ "|": IntBinopNode,
+ "^": IntBinopNode,
+ "&": IntBinopNode,
+ "<<": IntBinopNode,
+ ">>": IntBinopNode,
+ "+": AddNode,
+ "-": SubNode,
+ "*": MulNode,
+ "@": MatMultNode,
+ "/": DivNode,
+ "//": DivNode,
+ "%": ModNode,
+ "**": PowNode,
+}
+
+
+def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
+ # Construct binop node of appropriate class for
+ # given operator.
+ return binop_node_classes[operator](
+ pos,
+ operator=operator,
+ operand1=operand1,
+ operand2=operand2,
+ inplace=inplace,
+ **kwargs)
+
+
+#-------------------------------------------------------------------
+#
+# Coercion nodes
+#
+# Coercion nodes are special in that they are created during
+# the analyse_types phase of parse tree processing.
+# Their __init__ methods consequently incorporate some aspects
+# of that phase.
+#
+#-------------------------------------------------------------------
+
+class CoercionNode(ExprNode):
+ # Abstract base class for coercion nodes.
+ #
+ # arg ExprNode node being coerced
+
+ subexprs = ['arg']
+ constant_result = not_a_constant
+
+ def __init__(self, arg):
+ super(CoercionNode, self).__init__(arg.pos)
+ self.arg = arg
+ if debug_coercion:
+ print("%s Coercing %s" % (self, self.arg))
+
+ def calculate_constant_result(self):
+ # constant folding can break type coercion, so this is disabled
+ pass
+
+ def annotate(self, code):
+ self.arg.annotate(code)
+ if self.arg.type != self.type:
+ file, line, col = self.pos
+ code.annotate((file, line, col-1), AnnotationItem(
+ style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
+
+
+class CoerceToMemViewSliceNode(CoercionNode):
+ """
+ Coerce an object to a memoryview slice. This holds a new reference in
+ a managed temp.
+ """
+
+ def __init__(self, arg, dst_type, env):
+ assert dst_type.is_memoryviewslice
+ assert not arg.type.is_memoryviewslice
+ CoercionNode.__init__(self, arg)
+ self.type = dst_type
+ self.is_temp = 1
+ self.use_managed_ref = True
+ self.arg = arg
+ self.type.create_from_py_utility_code(env)
+
+ def generate_result_code(self, code):
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(),
+ self.result(),
+ self.pos,
+ code
+ ))
+
+
+class CastNode(CoercionNode):
+ # Wrap a node in a C type cast.
+
+ def __init__(self, arg, new_type):
+ CoercionNode.__init__(self, arg)
+ self.type = new_type
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def calculate_result_code(self):
+ return self.arg.result_as(self.type)
+
+ def generate_result_code(self, code):
+ self.arg.generate_result_code(code)
+
+
+class PyTypeTestNode(CoercionNode):
+ # This node is used to check that a generic Python
+ # object is an instance of a particular extension type.
+ # This node borrows the result of its argument node.
+
+ exact_builtin_type = True
+
+ def __init__(self, arg, dst_type, env, notnone=False):
+ # The arg is know to be a Python object, and
+ # the dst_type is known to be an extension type.
+ assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
+ CoercionNode.__init__(self, arg)
+ self.type = dst_type
+ self.result_ctype = arg.ctype()
+ self.notnone = notnone
+
+ nogil_check = Node.gil_error
+ gil_message = "Python type test"
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ if self.notnone:
+ return False
+ return self.arg.may_be_none()
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def result_in_temp(self):
+ return self.arg.result_in_temp()
+
+ def is_ephemeral(self):
+ return self.arg.is_ephemeral()
+
+ def nonlocally_immutable(self):
+ return self.arg.nonlocally_immutable()
+
+ def reanalyse(self):
+ if self.type != self.arg.type or not self.arg.is_temp:
+ return self
+ if not self.type.typeobj_is_available():
+ return self
+ if self.arg.may_be_none() and self.notnone:
+ return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
+ return self.arg
+
+ def calculate_constant_result(self):
+ # FIXME
+ pass
+
+ def calculate_result_code(self):
+ return self.arg.result()
+
+ def generate_result_code(self, code):
+ if self.type.typeobj_is_available():
+ if self.type.is_builtin_type:
+ type_test = self.type.type_test_code(
+ self.arg.py_result(),
+ self.notnone, exact=self.exact_builtin_type)
+ else:
+ type_test = self.type.type_test_code(
+ self.arg.py_result(), self.notnone)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
+ code.putln("if (!(%s)) %s" % (
+ type_test, code.error_goto(self.pos)))
+ else:
+ error(self.pos, "Cannot test type of extern C class "
+ "without type object name specification")
+
+ def generate_post_assignment_code(self, code):
+ self.arg.generate_post_assignment_code(code)
+
+ def allocate_temp_result(self, code):
+ pass
+
+ def release_temp_result(self, code):
+ pass
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+ def free_subexpr_temps(self, code):
+ self.arg.free_subexpr_temps(code)
+
+
+class NoneCheckNode(CoercionNode):
+ # This node is used to check that a Python object is not None and
+ # raises an appropriate exception (as specified by the creating
+ # transform).
+
+ is_nonecheck = True
+
+ def __init__(self, arg, exception_type_cname, exception_message,
+ exception_format_args=()):
+ CoercionNode.__init__(self, arg)
+ self.type = arg.type
+ self.result_ctype = arg.ctype()
+ self.exception_type_cname = exception_type_cname
+ self.exception_message = exception_message
+ self.exception_format_args = tuple(exception_format_args or ())
+
+ nogil_check = None # this node only guards an operation that would fail already
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def result_in_temp(self):
+ return self.arg.result_in_temp()
+
+ def nonlocally_immutable(self):
+ return self.arg.nonlocally_immutable()
+
+ def calculate_result_code(self):
+ return self.arg.result()
+
+ def condition(self):
+ if self.type.is_pyobject:
+ return self.arg.py_result()
+ elif self.type.is_memoryviewslice:
+ return "((PyObject *) %s.memview)" % self.arg.result()
+ else:
+ raise Exception("unsupported type")
+
+ @classmethod
+ def generate(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ node = cls(arg, exception_type_cname, exception_message, exception_format_args)
+ node.in_nogil_context = in_nogil_context
+ node.put_nonecheck(code)
+
+ @classmethod
+ def generate_if_needed(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ if arg.may_be_none():
+ cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
+
+ def put_nonecheck(self, code):
+ code.putln(
+ "if (unlikely(%s == Py_None)) {" % self.condition())
+
+ if self.in_nogil_context:
+ code.put_ensure_gil()
+
+ escape = StringEncoding.escape_byte_string
+ if self.exception_format_args:
+ code.putln('PyErr_Format(%s, "%s", %s);' % (
+ self.exception_type_cname,
+ StringEncoding.escape_byte_string(
+ self.exception_message.encode('UTF-8')),
+ ', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
+ for arg in self.exception_format_args ])))
+ else:
+ code.putln('PyErr_SetString(%s, "%s");' % (
+ self.exception_type_cname,
+ escape(self.exception_message.encode('UTF-8'))))
+
+ if self.in_nogil_context:
+ code.put_release_ensured_gil()
+
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ def generate_result_code(self, code):
+ self.put_nonecheck(code)
+
+ def generate_post_assignment_code(self, code):
+ self.arg.generate_post_assignment_code(code)
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+
+class CoerceToPyTypeNode(CoercionNode):
+ # This node is used to convert a C data type
+ # to a Python object.
+
+ type = py_object_type
+ target_type = py_object_type
+ is_temp = 1
+
+ def __init__(self, arg, env, type=py_object_type):
+ if not arg.type.create_to_py_utility_code(env):
+ error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
+ elif arg.type.is_complex:
+ # special case: complex coercion is so complex that it
+ # uses a macro ("__pyx_PyComplex_FromComplex()"), for
+ # which the argument must be simple
+ arg = arg.coerce_to_simple(env)
+ CoercionNode.__init__(self, arg)
+ if type is py_object_type:
+ # be specific about some known types
+ if arg.type.is_string or arg.type.is_cpp_string:
+ self.type = default_str_type(env)
+ elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
+ self.type = unicode_type
+ elif arg.type.is_complex:
+ self.type = Builtin.complex_type
+ self.target_type = self.type
+ elif arg.type.is_string or arg.type.is_cpp_string:
+ if (type not in (bytes_type, bytearray_type)
+ and not env.directives['c_string_encoding']):
+ error(arg.pos,
+ "default encoding required for conversion from '%s' to '%s'" %
+ (arg.type, type))
+ self.type = self.target_type = type
+ else:
+ # FIXME: check that the target type and the resulting type are compatible
+ self.target_type = type
+
+ gil_message = "Converting to Python object"
+
+ def may_be_none(self):
+ # FIXME: is this always safe?
+ return False
+
+ def coerce_to_boolean(self, env):
+ arg_type = self.arg.type
+ if (arg_type == PyrexTypes.c_bint_type or
+ (arg_type.is_pyobject and arg_type.name == 'bool')):
+ return self.arg.coerce_to_temp(env)
+ else:
+ return CoerceToBooleanNode(self, env)
+
+ def coerce_to_integer(self, env):
+ # If not already some C integer type, coerce to longint.
+ if self.arg.type.is_int:
+ return self.arg
+ else:
+ return self.arg.coerce_to(PyrexTypes.c_long_type, env)
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def generate_result_code(self, code):
+ code.putln('%s; %s' % (
+ self.arg.type.to_py_call_code(
+ self.arg.result(),
+ self.result(),
+ self.target_type),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+
+class CoerceIntToBytesNode(CoerceToPyTypeNode):
+ # This node is used to convert a C int type to a Python bytes
+ # object.
+
+ is_temp = 1
+
+ def __init__(self, arg, env):
+ arg = arg.coerce_to_simple(env)
+ CoercionNode.__init__(self, arg)
+ self.type = Builtin.bytes_type
+
+ def generate_result_code(self, code):
+ arg = self.arg
+ arg_result = arg.result()
+ if arg.type not in (PyrexTypes.c_char_type,
+ PyrexTypes.c_uchar_type,
+ PyrexTypes.c_schar_type):
+ if arg.type.signed:
+ code.putln("if ((%s < 0) || (%s > 255)) {" % (
+ arg_result, arg_result))
+ else:
+ code.putln("if (%s > 255) {" % arg_result)
+ code.putln('PyErr_SetString(PyExc_OverflowError, '
+ '"value too large to pack into a byte"); %s' % (
+ code.error_goto(self.pos)))
+ code.putln('}')
+ temp = None
+ if arg.type is not PyrexTypes.c_char_type:
+ temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
+ code.putln("%s = (char)%s;" % (temp, arg_result))
+ arg_result = temp
+ code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
+ self.result(),
+ arg_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+ if temp is not None:
+ code.funcstate.release_temp(temp)
+ code.put_gotref(self.py_result())
+
+
+class CoerceFromPyTypeNode(CoercionNode):
+ # This node is used to convert a Python object
+ # to a C data type.
+
+ def __init__(self, result_type, arg, env):
+ CoercionNode.__init__(self, arg)
+ self.type = result_type
+ self.is_temp = 1
+ if not result_type.create_from_py_utility_code(env):
+ error(arg.pos,
+ "Cannot convert Python object to '%s'" % result_type)
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
+ warning(arg.pos,
+ "Obtaining '%s' from externally modifiable global Python value" % result_type,
+ level=1)
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def is_ephemeral(self):
+ return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
+
+ def generate_result_code(self, code):
+ from_py_function = None
+ # for certain source types, we can do better than the generic coercion
+ if self.type.is_string and self.arg.type is bytes_type:
+ if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
+ from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
+ NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
+
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
+ if self.type.is_pyobject:
+ code.put_gotref(self.py_result())
+
+ def nogil_check(self, env):
+ error(self.pos, "Coercion from Python not allowed without the GIL")
+
+
+class CoerceToBooleanNode(CoercionNode):
+ # This node is used when a result needs to be used
+ # in a boolean context.
+
+ type = PyrexTypes.c_bint_type
+
+ _special_builtins = {
+ Builtin.list_type: 'PyList_GET_SIZE',
+ Builtin.tuple_type: 'PyTuple_GET_SIZE',
+ Builtin.set_type: 'PySet_GET_SIZE',
+ Builtin.frozenset_type: 'PySet_GET_SIZE',
+ Builtin.bytes_type: 'PyBytes_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
+ }
+
+ def __init__(self, arg, env):
+ CoercionNode.__init__(self, arg)
+ if arg.type.is_pyobject:
+ self.is_temp = 1
+
+ def nogil_check(self, env):
+ if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
+ self.gil_error()
+
+ gil_message = "Truth-testing Python object"
+
+ def check_const(self):
+ if self.is_temp:
+ self.not_const()
+ return False
+ return self.arg.check_const()
+
+ def calculate_result_code(self):
+ return "(%s != 0)" % self.arg.result()
+
+ def generate_result_code(self, code):
+ if not self.is_temp:
+ return
+ test_func = self._special_builtins.get(self.arg.type)
+ if test_func is not None:
+ checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
+ checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
+ code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
+ else:
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ self.result(),
+ self.arg.py_result(),
+ code.error_goto_if_neg(self.result(), self.pos)))
+
+
+class CoerceToComplexNode(CoercionNode):
+
+ def __init__(self, arg, dst_type, env):
+ if arg.type.is_complex:
+ arg = arg.coerce_to_simple(env)
+ self.type = dst_type
+ CoercionNode.__init__(self, arg)
+ dst_type.create_declaration_utility_code(env)
+
+ def calculate_result_code(self):
+ if self.arg.type.is_complex:
+ real_part = "__Pyx_CREAL(%s)" % self.arg.result()
+ imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
+ else:
+ real_part = self.arg.result()
+ imag_part = "0"
+ return "%s(%s, %s)" % (
+ self.type.from_parts,
+ real_part,
+ imag_part)
+
+ def generate_result_code(self, code):
+ pass
+
+class CoerceToTempNode(CoercionNode):
+ # This node is used to force the result of another node
+ # to be stored in a temporary. It is only used if the
+ # argument node's result is not already in a temporary.
+
+ def __init__(self, arg, env):
+ CoercionNode.__init__(self, arg)
+ self.type = self.arg.type.as_argument_type()
+ self.constant_result = self.arg.constant_result
+ self.is_temp = 1
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+
+ gil_message = "Creating temporary Python reference"
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def coerce_to_boolean(self, env):
+ self.arg = self.arg.coerce_to_boolean(env)
+ if self.arg.is_simple():
+ return self.arg
+ self.type = self.arg.type
+ self.result_ctype = self.type
+ return self
+
+ def generate_result_code(self, code):
+ #self.arg.generate_evaluation_code(code) # Already done
+ # by generic generate_subexpr_evaluation_code!
+ code.putln("%s = %s;" % (
+ self.result(), self.arg.result_as(self.ctype())))
+ if self.use_managed_ref:
+ if self.type.is_pyobject:
+ code.put_incref(self.result(), self.ctype())
+ elif self.type.is_memoryviewslice:
+ code.put_incref_memoryviewslice(self.result(),
+ not self.in_nogil_context)
+
+class ProxyNode(CoercionNode):
+ """
+ A node that should not be replaced by transforms or other means,
+ and hence can be useful to wrap the argument to a clone node
+
+ MyNode -> ProxyNode -> ArgNode
+ CloneNode -^
+ """
+
+ nogil_check = None
+
+ def __init__(self, arg):
+ super(ProxyNode, self).__init__(arg)
+ self.constant_result = arg.constant_result
+ self._proxy_type()
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_expressions(env)
+ self._proxy_type()
+ return self
+
+ def infer_type(self, env):
+ return self.arg.infer_type(env)
+
+ def _proxy_type(self):
+ if hasattr(self.arg, 'type'):
+ self.type = self.arg.type
+ self.result_ctype = self.arg.result_ctype
+ if hasattr(self.arg, 'entry'):
+ self.entry = self.arg.entry
+
+ def generate_result_code(self, code):
+ self.arg.generate_result_code(code)
+
+ def result(self):
+ return self.arg.result()
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def generate_evaluation_code(self, code):
+ self.arg.generate_evaluation_code(code)
+
+ def generate_disposal_code(self, code):
+ self.arg.generate_disposal_code(code)
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+class CloneNode(CoercionNode):
+ # This node is employed when the result of another node needs
+ # to be used multiple times. The argument node's result must
+ # be in a temporary. This node "borrows" the result from the
+ # argument node, and does not generate any evaluation or
+ # disposal code for it. The original owner of the argument
+ # node is responsible for doing those things.
+
+ subexprs = [] # Arg is not considered a subexpr
+ nogil_check = None
+
+ def __init__(self, arg):
+ CoercionNode.__init__(self, arg)
+ self.constant_result = arg.constant_result
+ if hasattr(arg, 'type'):
+ self.type = arg.type
+ self.result_ctype = arg.result_ctype
+ if hasattr(arg, 'entry'):
+ self.entry = arg.entry
+
+ def result(self):
+ return self.arg.result()
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def type_dependencies(self, env):
+ return self.arg.type_dependencies(env)
+
+ def infer_type(self, env):
+ return self.arg.infer_type(env)
+
+ def analyse_types(self, env):
+ self.type = self.arg.type
+ self.result_ctype = self.arg.result_ctype
+ self.is_temp = 1
+ if hasattr(self.arg, 'entry'):
+ self.entry = self.arg.entry
+ return self
+
+ def coerce_to(self, dest_type, env):
+ if self.arg.is_literal:
+ return self.arg.coerce_to(dest_type, env)
+ return super(CloneNode, self).coerce_to(dest_type, env)
+
+ def is_simple(self):
+ return True # result is always in a temp (or a name)
+
+ def generate_evaluation_code(self, code):
+ pass
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_disposal_code(self, code):
+ pass
+
+ def free_temps(self, code):
+ pass
+
+
+class CMethodSelfCloneNode(CloneNode):
+ # Special CloneNode for the self argument of builtin C methods
+ # that accepts subtypes of the builtin type. This is safe only
+ # for 'final' subtypes, as subtypes of the declared type may
+ # override the C method.
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
+ return self
+ return CloneNode.coerce_to(self, dst_type, env)
+
+
+class ModuleRefNode(ExprNode):
+ # Simple returns the module object
+
+ type = py_object_type
+ is_temp = False
+ subexprs = []
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def calculate_result_code(self):
+ return Naming.module_cname
+
+ def generate_result_code(self, code):
+ pass
+
+class DocstringRefNode(ExprNode):
+ # Extracts the docstring of the body element
+
+ subexprs = ['body']
+ type = py_object_type
+ is_temp = True
+
+ def __init__(self, pos, body):
+ ExprNode.__init__(self, pos)
+ assert body.type.is_pyobject
+ self.body = body
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
+ self.result(), self.body.result(),
+ code.intern_identifier(StringEncoding.EncodedString("__doc__")),
+ code.error_goto_if_null(self.result(), self.pos)))
+ code.put_gotref(self.result())
+
+
+
+#------------------------------------------------------------------------------------
+#
+# Runtime support code
+#
+#------------------------------------------------------------------------------------
+
+pyerr_occurred_withgil_utility_code= UtilityCode(
+proto = """
+static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
+""",
+impl = """
+static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
+ int err;
+ #ifdef WITH_THREAD
+ PyGILState_STATE _save = PyGILState_Ensure();
+ #endif
+ err = !!PyErr_Occurred();
+ #ifdef WITH_THREAD
+ PyGILState_Release(_save);
+ #endif
+ return err;
+}
+"""
+)
+
+#------------------------------------------------------------------------------------
+
+raise_unbound_local_error_utility_code = UtilityCode(
+proto = """
+static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
+""",
+impl = """
+static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
+ PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
+}
+""")
+
+raise_closure_name_error_utility_code = UtilityCode(
+proto = """
+static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
+""",
+impl = """
+static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
+ PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
+}
+""")
+
+# Don't inline the function, it should really never be called in production
+raise_unbound_memoryview_utility_code_nogil = UtilityCode(
+proto = """
+static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
+""",
+impl = """
+static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
+ #ifdef WITH_THREAD
+ PyGILState_STATE gilstate = PyGILState_Ensure();
+ #endif
+ __Pyx_RaiseUnboundLocalError(varname);
+ #ifdef WITH_THREAD
+ PyGILState_Release(gilstate);
+ #endif
+}
+""",
+requires = [raise_unbound_local_error_utility_code])
+
+#------------------------------------------------------------------------------------
+
+raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
+raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
+tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
diff --git a/contrib/tools/cython/Cython/Compiler/FlowControl.pxd b/contrib/tools/cython/Cython/Compiler/FlowControl.pxd
new file mode 100644
index 0000000000..c87370b819
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/FlowControl.pxd
@@ -0,0 +1,111 @@
+from __future__ import absolute_import
+
+cimport cython
+
+from .Visitor cimport CythonTransform, TreeVisitor
+
+cdef class ControlBlock:
+ cdef public set children
+ cdef public set parents
+ cdef public set positions
+ cdef public list stats
+ cdef public dict gen
+ cdef public set bounded
+
+ # Big integer bitsets
+ cdef public object i_input
+ cdef public object i_output
+ cdef public object i_gen
+ cdef public object i_kill
+ cdef public object i_state
+
+ cpdef bint empty(self)
+ cpdef detach(self)
+ cpdef add_child(self, block)
+
+cdef class ExitBlock(ControlBlock):
+ cpdef bint empty(self)
+
+cdef class NameAssignment:
+ cdef public bint is_arg
+ cdef public bint is_deletion
+ cdef public object lhs
+ cdef public object rhs
+ cdef public object entry
+ cdef public object pos
+ cdef public set refs
+ cdef public object bit
+ cdef public object inferred_type
+
+cdef class AssignmentList:
+ cdef public object bit
+ cdef public object mask
+ cdef public list stats
+
+cdef class AssignmentCollector(TreeVisitor):
+ cdef list assignments
+
+@cython.final
+cdef class ControlFlow:
+ cdef public set blocks
+ cdef public set entries
+ cdef public list loops
+ cdef public list exceptions
+
+ cdef public ControlBlock entry_point
+ cdef public ExitBlock exit_point
+ cdef public ControlBlock block
+
+ cdef public dict assmts
+
+ cpdef newblock(self, ControlBlock parent=*)
+ cpdef nextblock(self, ControlBlock parent=*)
+ cpdef bint is_tracked(self, entry)
+ cpdef bint is_statically_assigned(self, entry)
+ cpdef mark_position(self, node)
+ cpdef mark_assignment(self, lhs, rhs, entry)
+ cpdef mark_argument(self, lhs, rhs, entry)
+ cpdef mark_deletion(self, node, entry)
+ cpdef mark_reference(self, node, entry)
+
+ @cython.locals(block=ControlBlock, parent=ControlBlock, unreachable=set)
+ cpdef normalize(self)
+
+ @cython.locals(bit=object, assmts=AssignmentList,
+ block=ControlBlock)
+ cpdef initialize(self)
+
+ @cython.locals(assmts=AssignmentList, assmt=NameAssignment)
+ cpdef set map_one(self, istate, entry)
+
+ @cython.locals(block=ControlBlock, parent=ControlBlock)
+ cdef reaching_definitions(self)
+
+cdef class Uninitialized:
+ pass
+
+cdef class Unknown:
+ pass
+
+
+cdef class MessageCollection:
+ cdef set messages
+
+
+@cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock,
+ assmt=NameAssignment)
+cdef check_definitions(ControlFlow flow, dict compiler_directives)
+
+@cython.final
+cdef class ControlFlowAnalysis(CythonTransform):
+ cdef object gv_ctx
+ cdef object constant_folder
+ cdef set reductions
+ cdef list env_stack
+ cdef list stack
+ cdef object env
+ cdef ControlFlow flow
+ cdef bint in_inplace_assignment
+
+ cpdef mark_assignment(self, lhs, rhs=*)
+ cpdef mark_position(self, node)
diff --git a/contrib/tools/cython/Cython/Compiler/FlowControl.py b/contrib/tools/cython/Cython/Compiler/FlowControl.py
new file mode 100644
index 0000000000..df04471f90
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/FlowControl.py
@@ -0,0 +1,1325 @@
+from __future__ import absolute_import
+
+import cython
+cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object,
+ Builtin=object, InternalError=object, error=object, warning=object,
+ py_object_type=object, unspecified_type=object,
+ object_expr=object, fake_rhs_expr=object, TypedExprNode=object)
+
+from . import Builtin
+from . import ExprNodes
+from . import Nodes
+from . import Options
+from .PyrexTypes import py_object_type, unspecified_type
+from . import PyrexTypes
+
+from .Visitor import TreeVisitor, CythonTransform
+from .Errors import error, warning, InternalError
+from .Optimize import ConstantFolding
+
+
+class TypedExprNode(ExprNodes.ExprNode):
+ # Used for declaring assignments of a specified type without a known entry.
+ def __init__(self, type, may_be_none=None, pos=None):
+ super(TypedExprNode, self).__init__(pos)
+ self.type = type
+ self._may_be_none = may_be_none
+
+ def may_be_none(self):
+ return self._may_be_none != False
+
+object_expr = TypedExprNode(py_object_type, may_be_none=True)
+# Fake rhs to silence "unused variable" warning
+fake_rhs_expr = TypedExprNode(unspecified_type)
+
+
+class ControlBlock(object):
+ """Control flow graph node. Sequence of assignments and name references.
+
+ children set of children nodes
+ parents set of parent nodes
+ positions set of position markers
+
+ stats list of block statements
+ gen dict of assignments generated by this block
+ bounded set of entries that are definitely bounded in this block
+
+ Example:
+
+ a = 1
+ b = a + c # 'c' is already bounded or exception here
+
+ stats = [Assignment(a), NameReference(a), NameReference(c),
+ Assignment(b)]
+ gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
+ bounded = set([Entry(a), Entry(c)])
+
+ """
+
+ def __init__(self):
+ self.children = set()
+ self.parents = set()
+ self.positions = set()
+
+ self.stats = []
+ self.gen = {}
+ self.bounded = set()
+
+ self.i_input = 0
+ self.i_output = 0
+ self.i_gen = 0
+ self.i_kill = 0
+ self.i_state = 0
+
+ def empty(self):
+ return (not self.stats and not self.positions)
+
+ def detach(self):
+ """Detach block from parents and children."""
+ for child in self.children:
+ child.parents.remove(self)
+ for parent in self.parents:
+ parent.children.remove(self)
+ self.parents.clear()
+ self.children.clear()
+
+ def add_child(self, block):
+ self.children.add(block)
+ block.parents.add(self)
+
+
+class ExitBlock(ControlBlock):
+ """Non-empty exit point block."""
+
+ def empty(self):
+ return False
+
+
+class AssignmentList(object):
+ def __init__(self):
+ self.stats = []
+
+
+class ControlFlow(object):
+ """Control-flow graph.
+
+ entry_point ControlBlock entry point for this graph
+ exit_point ControlBlock normal exit point
+ block ControlBlock current block
+ blocks set children nodes
+ entries set tracked entries
+ loops list stack for loop descriptors
+ exceptions list stack for exception descriptors
+ """
+
+ def __init__(self):
+ self.blocks = set()
+ self.entries = set()
+ self.loops = []
+ self.exceptions = []
+
+ self.entry_point = ControlBlock()
+ self.exit_point = ExitBlock()
+ self.blocks.add(self.exit_point)
+ self.block = self.entry_point
+
+ def newblock(self, parent=None):
+ """Create floating block linked to `parent` if given.
+
+ NOTE: Block is NOT added to self.blocks
+ """
+ block = ControlBlock()
+ self.blocks.add(block)
+ if parent:
+ parent.add_child(block)
+ return block
+
+ def nextblock(self, parent=None):
+ """Create block children block linked to current or `parent` if given.
+
+ NOTE: Block is added to self.blocks
+ """
+ block = ControlBlock()
+ self.blocks.add(block)
+ if parent:
+ parent.add_child(block)
+ elif self.block:
+ self.block.add_child(block)
+ self.block = block
+ return self.block
+
+ def is_tracked(self, entry):
+ if entry.is_anonymous:
+ return False
+ return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
+ entry.from_closure or entry.in_closure or
+ entry.error_on_uninitialized)
+
+ def is_statically_assigned(self, entry):
+ if (entry.is_local and entry.is_variable and
+ (entry.type.is_struct_or_union or
+ entry.type.is_complex or
+ entry.type.is_array or
+ entry.type.is_cpp_class)):
+ # stack allocated structured variable => never uninitialised
+ return True
+ return False
+
+ def mark_position(self, node):
+ """Mark position, will be used to draw graph nodes."""
+ if self.block:
+ self.block.positions.add(node.pos[:2])
+
+ def mark_assignment(self, lhs, rhs, entry):
+ if self.block and self.is_tracked(entry):
+ assignment = NameAssignment(lhs, rhs, entry)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = assignment
+ self.entries.add(entry)
+
+ def mark_argument(self, lhs, rhs, entry):
+ if self.block and self.is_tracked(entry):
+ assignment = Argument(lhs, rhs, entry)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = assignment
+ self.entries.add(entry)
+
+ def mark_deletion(self, node, entry):
+ if self.block and self.is_tracked(entry):
+ assignment = NameDeletion(node, entry)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = Uninitialized
+ self.entries.add(entry)
+
+ def mark_reference(self, node, entry):
+ if self.block and self.is_tracked(entry):
+ self.block.stats.append(NameReference(node, entry))
+ ## XXX: We don't track expression evaluation order so we can't use
+ ## XXX: successful reference as initialization sign.
+ ## # Local variable is definitely bound after this reference
+ ## if not node.allow_null:
+ ## self.block.bounded.add(entry)
+ self.entries.add(entry)
+
+ def normalize(self):
+ """Delete unreachable and orphan blocks."""
+ queue = set([self.entry_point])
+ visited = set()
+ while queue:
+ root = queue.pop()
+ visited.add(root)
+ for child in root.children:
+ if child not in visited:
+ queue.add(child)
+ unreachable = self.blocks - visited
+ for block in unreachable:
+ block.detach()
+ visited.remove(self.entry_point)
+ for block in visited:
+ if block.empty():
+ for parent in block.parents: # Re-parent
+ for child in block.children:
+ parent.add_child(child)
+ block.detach()
+ unreachable.add(block)
+ self.blocks -= unreachable
+
+ def initialize(self):
+ """Set initial state, map assignments to bits."""
+ self.assmts = {}
+
+ bit = 1
+ for entry in self.entries:
+ assmts = AssignmentList()
+ assmts.mask = assmts.bit = bit
+ self.assmts[entry] = assmts
+ bit <<= 1
+
+ for block in self.blocks:
+ for stat in block.stats:
+ if isinstance(stat, NameAssignment):
+ stat.bit = bit
+ assmts = self.assmts[stat.entry]
+ assmts.stats.append(stat)
+ assmts.mask |= bit
+ bit <<= 1
+
+ for block in self.blocks:
+ for entry, stat in block.gen.items():
+ assmts = self.assmts[entry]
+ if stat is Uninitialized:
+ block.i_gen |= assmts.bit
+ else:
+ block.i_gen |= stat.bit
+ block.i_kill |= assmts.mask
+ block.i_output = block.i_gen
+ for entry in block.bounded:
+ block.i_kill |= self.assmts[entry].bit
+
+ for assmts in self.assmts.values():
+ self.entry_point.i_gen |= assmts.bit
+ self.entry_point.i_output = self.entry_point.i_gen
+
+ def map_one(self, istate, entry):
+ ret = set()
+ assmts = self.assmts[entry]
+ if istate & assmts.bit:
+ if self.is_statically_assigned(entry):
+ ret.add(StaticAssignment(entry))
+ elif entry.from_closure:
+ ret.add(Unknown)
+ else:
+ ret.add(Uninitialized)
+ for assmt in assmts.stats:
+ if istate & assmt.bit:
+ ret.add(assmt)
+ return ret
+
+ def reaching_definitions(self):
+ """Per-block reaching definitions analysis."""
+ dirty = True
+ while dirty:
+ dirty = False
+ for block in self.blocks:
+ i_input = 0
+ for parent in block.parents:
+ i_input |= parent.i_output
+ i_output = (i_input & ~block.i_kill) | block.i_gen
+ if i_output != block.i_output:
+ dirty = True
+ block.i_input = i_input
+ block.i_output = i_output
+
+
+class LoopDescr(object):
+ def __init__(self, next_block, loop_block):
+ self.next_block = next_block
+ self.loop_block = loop_block
+ self.exceptions = []
+
+
+class ExceptionDescr(object):
+ """Exception handling helper.
+
+ entry_point ControlBlock Exception handling entry point
+ finally_enter ControlBlock Normal finally clause entry point
+ finally_exit ControlBlock Normal finally clause exit point
+ """
+
+ def __init__(self, entry_point, finally_enter=None, finally_exit=None):
+ self.entry_point = entry_point
+ self.finally_enter = finally_enter
+ self.finally_exit = finally_exit
+
+
+class NameAssignment(object):
+ def __init__(self, lhs, rhs, entry):
+ if lhs.cf_state is None:
+ lhs.cf_state = set()
+ self.lhs = lhs
+ self.rhs = rhs
+ self.entry = entry
+ self.pos = lhs.pos
+ self.refs = set()
+ self.is_arg = False
+ self.is_deletion = False
+ self.inferred_type = None
+
+ def __repr__(self):
+ return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
+
+ def infer_type(self):
+ self.inferred_type = self.rhs.infer_type(self.entry.scope)
+ return self.inferred_type
+
+ def type_dependencies(self):
+ return self.rhs.type_dependencies(self.entry.scope)
+
+ @property
+ def type(self):
+ if not self.entry.type.is_unspecified:
+ return self.entry.type
+ return self.inferred_type
+
+
+class StaticAssignment(NameAssignment):
+ """Initialised at declaration time, e.g. stack allocation."""
+ def __init__(self, entry):
+ if not entry.type.is_pyobject:
+ may_be_none = False
+ else:
+ may_be_none = None # unknown
+ lhs = TypedExprNode(
+ entry.type, may_be_none=may_be_none, pos=entry.pos)
+ super(StaticAssignment, self).__init__(lhs, lhs, entry)
+
+ def infer_type(self):
+ return self.entry.type
+
+ def type_dependencies(self):
+ return ()
+
+
+class Argument(NameAssignment):
+ def __init__(self, lhs, rhs, entry):
+ NameAssignment.__init__(self, lhs, rhs, entry)
+ self.is_arg = True
+
+
+class NameDeletion(NameAssignment):
+ def __init__(self, lhs, entry):
+ NameAssignment.__init__(self, lhs, lhs, entry)
+ self.is_deletion = True
+
+ def infer_type(self):
+ inferred_type = self.rhs.infer_type(self.entry.scope)
+ if (not inferred_type.is_pyobject and
+ inferred_type.can_coerce_to_pyobject(self.entry.scope)):
+ return py_object_type
+ self.inferred_type = inferred_type
+ return inferred_type
+
+
+class Uninitialized(object):
+ """Definitely not initialised yet."""
+
+
+class Unknown(object):
+ """Coming from outer closure, might be initialised or not."""
+
+
+class NameReference(object):
+ def __init__(self, node, entry):
+ if node.cf_state is None:
+ node.cf_state = set()
+ self.node = node
+ self.entry = entry
+ self.pos = node.pos
+
+ def __repr__(self):
+ return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
+
+
+class ControlFlowState(list):
+ # Keeps track of Node's entry assignments
+ #
+ # cf_is_null [boolean] It is uninitialized
+ # cf_maybe_null [boolean] May be uninitialized
+ # is_single [boolean] Has only one assignment at this point
+
+ cf_maybe_null = False
+ cf_is_null = False
+ is_single = False
+
+ def __init__(self, state):
+ if Uninitialized in state:
+ state.discard(Uninitialized)
+ self.cf_maybe_null = True
+ if not state:
+ self.cf_is_null = True
+ elif Unknown in state:
+ state.discard(Unknown)
+ self.cf_maybe_null = True
+ else:
+ if len(state) == 1:
+ self.is_single = True
+ # XXX: Remove fake_rhs_expr
+ super(ControlFlowState, self).__init__(
+ [i for i in state if i.rhs is not fake_rhs_expr])
+
+ def one(self):
+ return self[0]
+
+
+class GVContext(object):
+ """Graphviz subgraph object."""
+
+ def __init__(self):
+ self.blockids = {}
+ self.nextid = 0
+ self.children = []
+ self.sources = {}
+
+ def add(self, child):
+ self.children.append(child)
+
+ def nodeid(self, block):
+ if block not in self.blockids:
+ self.blockids[block] = 'block%d' % self.nextid
+ self.nextid += 1
+ return self.blockids[block]
+
+ def extract_sources(self, block):
+ if not block.positions:
+ return ''
+ start = min(block.positions)
+ stop = max(block.positions)
+ srcdescr = start[0]
+ if not srcdescr in self.sources:
+ self.sources[srcdescr] = list(srcdescr.get_lines())
+ lines = self.sources[srcdescr]
+ return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
+
+ def render(self, fp, name, annotate_defs=False):
+ """Render graphviz dot graph"""
+ fp.write('digraph %s {\n' % name)
+ fp.write(' node [shape=box];\n')
+ for child in self.children:
+ child.render(fp, self, annotate_defs)
+ fp.write('}\n')
+
+ def escape(self, text):
+ return text.replace('"', '\\"').replace('\n', '\\n')
+
+
+class GV(object):
+ """Graphviz DOT renderer."""
+
+ def __init__(self, name, flow):
+ self.name = name
+ self.flow = flow
+
+ def render(self, fp, ctx, annotate_defs=False):
+ fp.write(' subgraph %s {\n' % self.name)
+ for block in self.flow.blocks:
+ label = ctx.extract_sources(block)
+ if annotate_defs:
+ for stat in block.stats:
+ if isinstance(stat, NameAssignment):
+ label += '\n %s [%s %s]' % (
+ stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1])
+ elif isinstance(stat, NameReference):
+ if stat.entry:
+ label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1])
+ if not label:
+ label = 'empty'
+ pid = ctx.nodeid(block)
+ fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
+ for block in self.flow.blocks:
+ pid = ctx.nodeid(block)
+ for child in block.children:
+ fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
+ fp.write(' }\n')
+
+
+class MessageCollection(object):
+ """Collect error/warnings messages first then sort"""
+ def __init__(self):
+ self.messages = set()
+
+ def error(self, pos, message):
+ self.messages.add((pos, True, message))
+
+ def warning(self, pos, message):
+ self.messages.add((pos, False, message))
+
+ def report(self):
+ for pos, is_error, message in sorted(self.messages):
+ if is_error:
+ error(pos, message)
+ else:
+ warning(pos, message, 2)
+
+
+def check_definitions(flow, compiler_directives):
+ flow.initialize()
+ flow.reaching_definitions()
+
+ # Track down state
+ assignments = set()
+ # Node to entry map
+ references = {}
+ assmt_nodes = set()
+
+ for block in flow.blocks:
+ i_state = block.i_input
+ for stat in block.stats:
+ i_assmts = flow.assmts[stat.entry]
+ state = flow.map_one(i_state, stat.entry)
+ if isinstance(stat, NameAssignment):
+ stat.lhs.cf_state.update(state)
+ assmt_nodes.add(stat.lhs)
+ i_state = i_state & ~i_assmts.mask
+ if stat.is_deletion:
+ i_state |= i_assmts.bit
+ else:
+ i_state |= stat.bit
+ assignments.add(stat)
+ if stat.rhs is not fake_rhs_expr:
+ stat.entry.cf_assignments.append(stat)
+ elif isinstance(stat, NameReference):
+ references[stat.node] = stat.entry
+ stat.entry.cf_references.append(stat)
+ stat.node.cf_state.update(state)
+ ## if not stat.node.allow_null:
+ ## i_state &= ~i_assmts.bit
+ ## # after successful read, the state is known to be initialised
+ state.discard(Uninitialized)
+ state.discard(Unknown)
+ for assmt in state:
+ assmt.refs.add(stat)
+
+ # Check variable usage
+ warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
+ warn_unused_result = compiler_directives['warn.unused_result']
+ warn_unused = compiler_directives['warn.unused']
+ warn_unused_arg = compiler_directives['warn.unused_arg']
+
+ messages = MessageCollection()
+
+ # assignment hints
+ for node in assmt_nodes:
+ if Uninitialized in node.cf_state:
+ node.cf_maybe_null = True
+ if len(node.cf_state) == 1:
+ node.cf_is_null = True
+ else:
+ node.cf_is_null = False
+ elif Unknown in node.cf_state:
+ node.cf_maybe_null = True
+ else:
+ node.cf_is_null = False
+ node.cf_maybe_null = False
+
+ # Find uninitialized references and cf-hints
+ for node, entry in references.items():
+ if Uninitialized in node.cf_state:
+ node.cf_maybe_null = True
+ if not entry.from_closure and len(node.cf_state) == 1:
+ node.cf_is_null = True
+ if (node.allow_null or entry.from_closure
+ or entry.is_pyclass_attr or entry.type.is_error):
+ pass # Can be uninitialized here
+ elif node.cf_is_null:
+ if entry.error_on_uninitialized or (
+ Options.error_on_uninitialized and (
+ entry.type.is_pyobject or entry.type.is_unspecified)):
+ messages.error(
+ node.pos,
+ "local variable '%s' referenced before assignment"
+ % entry.name)
+ else:
+ messages.warning(
+ node.pos,
+ "local variable '%s' referenced before assignment"
+ % entry.name)
+ elif warn_maybe_uninitialized:
+ messages.warning(
+ node.pos,
+ "local variable '%s' might be referenced before assignment"
+ % entry.name)
+ elif Unknown in node.cf_state:
+ # TODO: better cross-closure analysis to know when inner functions
+ # are being called before a variable is being set, and when
+ # a variable is known to be set before even defining the
+ # inner function, etc.
+ node.cf_maybe_null = True
+ else:
+ node.cf_is_null = False
+ node.cf_maybe_null = False
+
+ # Unused result
+ for assmt in assignments:
+ if (not assmt.refs and not assmt.entry.is_pyclass_attr
+ and not assmt.entry.in_closure):
+ if assmt.entry.cf_references and warn_unused_result:
+ if assmt.is_arg:
+ messages.warning(assmt.pos, "Unused argument value '%s'" %
+ assmt.entry.name)
+ else:
+ messages.warning(assmt.pos, "Unused result in '%s'" %
+ assmt.entry.name)
+ assmt.lhs.cf_used = False
+
+ # Unused entries
+ for entry in flow.entries:
+ if (not entry.cf_references
+ and not entry.is_pyclass_attr):
+ if entry.name != '_' and not entry.name.startswith('unused'):
+ # '_' is often used for unused variables, e.g. in loops
+ if entry.is_arg:
+ if warn_unused_arg:
+ messages.warning(entry.pos, "Unused argument '%s'" %
+ entry.name)
+ else:
+ if warn_unused:
+ messages.warning(entry.pos, "Unused entry '%s'" %
+ entry.name)
+ entry.cf_used = False
+
+ messages.report()
+
+ for node in assmt_nodes:
+ node.cf_state = ControlFlowState(node.cf_state)
+ for node in references:
+ node.cf_state = ControlFlowState(node.cf_state)
+
+
+class AssignmentCollector(TreeVisitor):
+ def __init__(self):
+ super(AssignmentCollector, self).__init__()
+ self.assignments = []
+
+ def visit_Node(self):
+ self._visitchildren(self, None)
+
+ def visit_SingleAssignmentNode(self, node):
+ self.assignments.append((node.lhs, node.rhs))
+
+ def visit_CascadedAssignmentNode(self, node):
+ for lhs in node.lhs_list:
+ self.assignments.append((lhs, node.rhs))
+
+
+class ControlFlowAnalysis(CythonTransform):
+
+ def visit_ModuleNode(self, node):
+ self.gv_ctx = GVContext()
+ self.constant_folder = ConstantFolding()
+
+ # Set of NameNode reductions
+ self.reductions = set()
+
+ self.in_inplace_assignment = False
+ self.env_stack = []
+ self.env = node.scope
+ self.stack = []
+ self.flow = ControlFlow()
+ self.visitchildren(node)
+
+ check_definitions(self.flow, self.current_directives)
+
+ dot_output = self.current_directives['control_flow.dot_output']
+ if dot_output:
+ annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
+ fp = open(dot_output, 'wt')
+ try:
+ self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
+ finally:
+ fp.close()
+ return node
+
+ def visit_FuncDefNode(self, node):
+ for arg in node.args:
+ if arg.default:
+ self.visitchildren(arg)
+ self.visitchildren(node, ('decorators',))
+ self.env_stack.append(self.env)
+ self.env = node.local_scope
+ self.stack.append(self.flow)
+ self.flow = ControlFlow()
+
+ # Collect all entries
+ for entry in node.local_scope.entries.values():
+ if self.flow.is_tracked(entry):
+ self.flow.entries.add(entry)
+
+ self.mark_position(node)
+ # Function body block
+ self.flow.nextblock()
+
+ for arg in node.args:
+ self._visit(arg)
+ if node.star_arg:
+ self.flow.mark_argument(node.star_arg,
+ TypedExprNode(Builtin.tuple_type,
+ may_be_none=False),
+ node.star_arg.entry)
+ if node.starstar_arg:
+ self.flow.mark_argument(node.starstar_arg,
+ TypedExprNode(Builtin.dict_type,
+ may_be_none=False),
+ node.starstar_arg.entry)
+ self._visit(node.body)
+ # Workaround for generators
+ if node.is_generator:
+ self._visit(node.gbody.body)
+
+ # Exit point
+ if self.flow.block:
+ self.flow.block.add_child(self.flow.exit_point)
+
+ # Cleanup graph
+ self.flow.normalize()
+ check_definitions(self.flow, self.current_directives)
+ self.flow.blocks.add(self.flow.entry_point)
+
+ self.gv_ctx.add(GV(node.local_scope.name, self.flow))
+
+ self.flow = self.stack.pop()
+ self.env = self.env_stack.pop()
+ return node
+
+ def visit_DefNode(self, node):
+ node.used = True
+ return self.visit_FuncDefNode(node)
+
+ def visit_GeneratorBodyDefNode(self, node):
+ return node
+
+ def visit_CTypeDefNode(self, node):
+ return node
+
+ def mark_assignment(self, lhs, rhs=None):
+ if not self.flow.block:
+ return
+ if self.flow.exceptions:
+ exc_descr = self.flow.exceptions[-1]
+ self.flow.block.add_child(exc_descr.entry_point)
+ self.flow.nextblock()
+
+ if not rhs:
+ rhs = object_expr
+ if lhs.is_name:
+ if lhs.entry is not None:
+ entry = lhs.entry
+ else:
+ entry = self.env.lookup(lhs.name)
+ if entry is None: # TODO: This shouldn't happen...
+ return
+ self.flow.mark_assignment(lhs, rhs, entry)
+ elif lhs.is_sequence_constructor:
+ for i, arg in enumerate(lhs.args):
+ if not rhs or arg.is_starred:
+ item_node = None
+ else:
+ item_node = rhs.inferable_item_node(i)
+ self.mark_assignment(arg, item_node)
+ else:
+ self._visit(lhs)
+
+ if self.flow.exceptions:
+ exc_descr = self.flow.exceptions[-1]
+ self.flow.block.add_child(exc_descr.entry_point)
+ self.flow.nextblock()
+
+ def mark_position(self, node):
+ """Mark position if DOT output is enabled."""
+ if self.current_directives['control_flow.dot_output']:
+ self.flow.mark_position(node)
+
+ def visit_FromImportStatNode(self, node):
+ for name, target in node.items:
+ if name != "*":
+ self.mark_assignment(target)
+ self.visitchildren(node)
+ return node
+
+ def visit_AssignmentNode(self, node):
+ raise InternalError("Unhandled assignment node")
+
+ def visit_SingleAssignmentNode(self, node):
+ self._visit(node.rhs)
+ self.mark_assignment(node.lhs, node.rhs)
+ return node
+
+ def visit_CascadedAssignmentNode(self, node):
+ self._visit(node.rhs)
+ for lhs in node.lhs_list:
+ self.mark_assignment(lhs, node.rhs)
+ return node
+
+ def visit_ParallelAssignmentNode(self, node):
+ collector = AssignmentCollector()
+ collector.visitchildren(node)
+ for lhs, rhs in collector.assignments:
+ self._visit(rhs)
+ for lhs, rhs in collector.assignments:
+ self.mark_assignment(lhs, rhs)
+ return node
+
+ def visit_InPlaceAssignmentNode(self, node):
+ self.in_inplace_assignment = True
+ self.visitchildren(node)
+ self.in_inplace_assignment = False
+ self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node()))
+ return node
+
+ def visit_DelStatNode(self, node):
+ for arg in node.args:
+ if arg.is_name:
+ entry = arg.entry or self.env.lookup(arg.name)
+ if entry.in_closure or entry.from_closure:
+ error(arg.pos,
+ "can not delete variable '%s' "
+ "referenced in nested scope" % entry.name)
+ if not node.ignore_nonexisting:
+ self._visit(arg) # mark reference
+ self.flow.mark_deletion(arg, entry)
+ else:
+ self._visit(arg)
+ return node
+
+ def visit_CArgDeclNode(self, node):
+ entry = self.env.lookup(node.name)
+ if entry:
+ may_be_none = not node.not_none
+ self.flow.mark_argument(
+ node, TypedExprNode(entry.type, may_be_none), entry)
+ return node
+
+ def visit_NameNode(self, node):
+ if self.flow.block:
+ entry = node.entry or self.env.lookup(node.name)
+ if entry:
+ self.flow.mark_reference(node, entry)
+
+ if entry in self.reductions and not self.in_inplace_assignment:
+ error(node.pos,
+ "Cannot read reduction variable in loop body")
+
+ return node
+
+ def visit_StatListNode(self, node):
+ if self.flow.block:
+ for stat in node.stats:
+ self._visit(stat)
+ if not self.flow.block:
+ stat.is_terminator = True
+ break
+ return node
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ self.mark_position(node)
+ return node
+
+ def visit_SizeofVarNode(self, node):
+ return node
+
+ def visit_TypeidNode(self, node):
+ return node
+
+ def visit_IfStatNode(self, node):
+ next_block = self.flow.newblock()
+ parent = self.flow.block
+ # If clauses
+ for clause in node.if_clauses:
+ parent = self.flow.nextblock(parent)
+ self._visit(clause.condition)
+ self.flow.nextblock()
+ self._visit(clause.body)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=parent)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ parent.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_WhileStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition block
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ if node.condition:
+ self._visit(node.condition)
+ # Body block
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ self.flow.block.add_child(next_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def mark_forloop_target(self, node):
+ # TODO: Remove redundancy with range optimization...
+ is_special = False
+ sequence = node.iterator.sequence
+ target = node.target
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = self.env.lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name == 'reversed' and len(sequence.args) == 1:
+ sequence = sequence.args[0]
+ elif function.name == 'enumerate' and len(sequence.args) == 1:
+ if target.is_sequence_constructor and len(target.args) == 2:
+ iterator = sequence.args[0]
+ if iterator.is_name:
+ iterator_type = iterator.infer_type(self.env)
+ if iterator_type.is_builtin_type:
+ # assume that builtin types have a length within Py_ssize_t
+ self.mark_assignment(
+ target.args[0],
+ ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type))
+ target = target.args[1]
+ sequence = sequence.args[0]
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = self.env.lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name in ('range', 'xrange'):
+ is_special = True
+ for arg in sequence.args[:2]:
+ self.mark_assignment(target, arg)
+ if len(sequence.args) > 2:
+ self.mark_assignment(target, self.constant_folder(
+ ExprNodes.binop_node(node.pos,
+ '+',
+ sequence.args[0],
+ sequence.args[2])))
+
+ if not is_special:
+ # A for-loop basically translates to subsequent calls to
+ # __getitem__(), so using an IndexNode here allows us to
+ # naturally infer the base type of pointers, C arrays,
+ # Python strings, etc., while correctly falling back to an
+ # object type when the base type cannot be handled.
+
+ self.mark_assignment(target, node.item)
+
+ def visit_AsyncForStatNode(self, node):
+ return self.visit_ForInStatNode(node)
+
+ def visit_ForInStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition with iterator
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ self._visit(node.iterator)
+ # Target assignment
+ self.flow.nextblock()
+
+ if isinstance(node, Nodes.ForInStatNode):
+ self.mark_forloop_target(node)
+ elif isinstance(node, Nodes.AsyncForStatNode):
+ # not entirely correct, but good enough for now
+ self.mark_assignment(node.target, node.item)
+ else: # Parallel
+ self.mark_assignment(node.target)
+
+ # Body block
+ if isinstance(node, Nodes.ParallelRangeNode):
+ # In case of an invalid
+ self._delete_privates(node, exclude=node.target.entry)
+
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def _delete_privates(self, node, exclude=None):
+ for private_node in node.assigned_nodes:
+ if not exclude or private_node.entry is not exclude:
+ self.flow.mark_deletion(private_node, private_node.entry)
+
+ def visit_ParallelRangeNode(self, node):
+ reductions = self.reductions
+
+ # if node.target is None or not a NameNode, an error will have
+ # been previously issued
+ if hasattr(node.target, 'entry'):
+ self.reductions = set(reductions)
+
+ for private_node in node.assigned_nodes:
+ private_node.entry.error_on_uninitialized = True
+ pos, reduction = node.assignments[private_node.entry]
+ if reduction:
+ self.reductions.add(private_node.entry)
+
+ node = self.visit_ForInStatNode(node)
+
+ self.reductions = reductions
+ return node
+
+ def visit_ParallelWithBlockNode(self, node):
+ for private_node in node.assigned_nodes:
+ private_node.entry.error_on_uninitialized = True
+
+ self._delete_privates(node)
+ self.visitchildren(node)
+ self._delete_privates(node)
+
+ return node
+
+ def visit_ForFromStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition with iterator
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ self._visit(node.bound1)
+ self._visit(node.bound2)
+ if node.step is not None:
+ self._visit(node.step)
+ # Target assignment
+ self.flow.nextblock()
+ self.mark_assignment(node.target, node.bound1)
+ if node.step is not None:
+ self.mark_assignment(node.target, self.constant_folder(
+ ExprNodes.binop_node(node.pos, '+', node.bound1, node.step)))
+ # Body block
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_LoopNode(self, node):
+ raise InternalError("Generic loops are not supported")
+
+ def visit_WithTargetAssignmentStatNode(self, node):
+ self.mark_assignment(node.lhs, node.with_node.enter_call)
+ return node
+
+ def visit_WithStatNode(self, node):
+ self._visit(node.manager)
+ self._visit(node.enter_call)
+ self._visit(node.body)
+ return node
+
+ def visit_TryExceptStatNode(self, node):
+ # After exception handling
+ next_block = self.flow.newblock()
+ # Body block
+ self.flow.newblock()
+ # Exception entry point
+ entry_point = self.flow.newblock()
+ self.flow.exceptions.append(ExceptionDescr(entry_point))
+ self.flow.nextblock()
+ ## XXX: links to exception handling point should be added by
+ ## XXX: children nodes
+ self.flow.block.add_child(entry_point)
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.exceptions.pop()
+
+ # After exception
+ if self.flow.block:
+ if node.else_clause:
+ self.flow.nextblock()
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+
+ for clause in node.except_clauses:
+ self.flow.block = entry_point
+ if clause.pattern:
+ for pattern in clause.pattern:
+ self._visit(pattern)
+ else:
+ # TODO: handle * pattern
+ pass
+ entry_point = self.flow.newblock(parent=self.flow.block)
+ self.flow.nextblock()
+ if clause.target:
+ self.mark_assignment(clause.target)
+ self._visit(clause.body)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+
+ if self.flow.exceptions:
+ entry_point.add_child(self.flow.exceptions[-1].entry_point)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_TryFinallyStatNode(self, node):
+ body_block = self.flow.nextblock()
+
+ # Exception entry point
+ entry_point = self.flow.newblock()
+ self.flow.block = entry_point
+ self._visit(node.finally_except_clause)
+
+ if self.flow.block and self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+
+ # Normal execution
+ finally_enter = self.flow.newblock()
+ self.flow.block = finally_enter
+ self._visit(node.finally_clause)
+ finally_exit = self.flow.block
+
+ descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
+ self.flow.exceptions.append(descr)
+ if self.flow.loops:
+ self.flow.loops[-1].exceptions.append(descr)
+ self.flow.block = body_block
+ body_block.add_child(entry_point)
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.exceptions.pop()
+ if self.flow.loops:
+ self.flow.loops[-1].exceptions.pop()
+
+ if self.flow.block:
+ self.flow.block.add_child(finally_enter)
+ if finally_exit:
+ self.flow.block = self.flow.nextblock(parent=finally_exit)
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_RaiseStatNode(self, node):
+ self.mark_position(node)
+ self.visitchildren(node)
+ if self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+ self.flow.block = None
+ return node
+
+ def visit_ReraiseStatNode(self, node):
+ self.mark_position(node)
+ if self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+ self.flow.block = None
+ return node
+
+ def visit_ReturnStatNode(self, node):
+ self.mark_position(node)
+ self.visitchildren(node)
+
+ outer_exception_handlers = iter(self.flow.exceptions[::-1])
+ for handler in outer_exception_handlers:
+ if handler.finally_enter:
+ self.flow.block.add_child(handler.finally_enter)
+ if handler.finally_exit:
+ # 'return' goes to function exit, or to the next outer 'finally' clause
+ exit_point = self.flow.exit_point
+ for next_handler in outer_exception_handlers:
+ if next_handler.finally_enter:
+ exit_point = next_handler.finally_enter
+ break
+ handler.finally_exit.add_child(exit_point)
+ break
+ else:
+ if self.flow.block:
+ self.flow.block.add_child(self.flow.exit_point)
+ self.flow.block = None
+ return node
+
+ def visit_BreakStatNode(self, node):
+ if not self.flow.loops:
+ #error(node.pos, "break statement not inside loop")
+ return node
+ loop = self.flow.loops[-1]
+ self.mark_position(node)
+ for exception in loop.exceptions[::-1]:
+ if exception.finally_enter:
+ self.flow.block.add_child(exception.finally_enter)
+ if exception.finally_exit:
+ exception.finally_exit.add_child(loop.next_block)
+ break
+ else:
+ self.flow.block.add_child(loop.next_block)
+ self.flow.block = None
+ return node
+
+ def visit_ContinueStatNode(self, node):
+ if not self.flow.loops:
+ #error(node.pos, "continue statement not inside loop")
+ return node
+ loop = self.flow.loops[-1]
+ self.mark_position(node)
+ for exception in loop.exceptions[::-1]:
+ if exception.finally_enter:
+ self.flow.block.add_child(exception.finally_enter)
+ if exception.finally_exit:
+ exception.finally_exit.add_child(loop.loop_block)
+ break
+ else:
+ self.flow.block.add_child(loop.loop_block)
+ self.flow.block = None
+ return node
+
+ def visit_ComprehensionNode(self, node):
+ if node.expr_scope:
+ self.env_stack.append(self.env)
+ self.env = node.expr_scope
+ # Skip append node here
+ self._visit(node.loop)
+ if node.expr_scope:
+ self.env = self.env_stack.pop()
+ return node
+
+ def visit_ScopedExprNode(self, node):
+ if node.expr_scope:
+ self.env_stack.append(self.env)
+ self.env = node.expr_scope
+ self.visitchildren(node)
+ if node.expr_scope:
+ self.env = self.env_stack.pop()
+ return node
+
+ def visit_PyClassDefNode(self, node):
+ self.visitchildren(node, ('dict', 'metaclass',
+ 'mkw', 'bases', 'class_result'))
+ self.flow.mark_assignment(node.target, node.classobj,
+ self.env.lookup(node.name))
+ self.env_stack.append(self.env)
+ self.env = node.scope
+ self.flow.nextblock()
+ self.visitchildren(node, ('body',))
+ self.flow.nextblock()
+ self.env = self.env_stack.pop()
+ return node
+
+ def visit_AmpersandNode(self, node):
+ if node.operand.is_name:
+ # Fake assignment to silence warning
+ self.mark_assignment(node.operand, fake_rhs_expr)
+ self.visitchildren(node)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/FusedNode.py b/contrib/tools/cython/Cython/Compiler/FusedNode.py
new file mode 100644
index 0000000000..26d6ffd3d6
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/FusedNode.py
@@ -0,0 +1,901 @@
+from __future__ import absolute_import
+
+import copy
+
+from . import (ExprNodes, PyrexTypes, MemoryView,
+ ParseTreeTransforms, StringEncoding, Errors)
+from .ExprNodes import CloneNode, ProxyNode, TupleNode
+from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
+from ..Utils import OrderedSet
+
+
+class FusedCFuncDefNode(StatListNode):
+ """
+ This node replaces a function with fused arguments. It deep-copies the
+ function for every permutation of fused types, and allocates a new local
+ scope for it. It keeps track of the original function in self.node, and
+ the entry of the original function in the symbol table is given the
+ 'fused_cfunction' attribute which points back to us.
+ Then when a function lookup occurs (to e.g. call it), the call can be
+ dispatched to the right function.
+
+ node FuncDefNode the original function
+ nodes [FuncDefNode] list of copies of node with different specific types
+ py_func DefNode the fused python function subscriptable from
+ Python space
+ __signatures__ A DictNode mapping signature specialization strings
+ to PyCFunction nodes
+ resulting_fused_function PyCFunction for the fused DefNode that delegates
+ to specializations
+ fused_func_assignment Assignment of the fused function to the function name
+ defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
+ defaults would result in many different tuples)
+ specialized_pycfuncs List of synthesized pycfunction nodes for the
+ specializations
+ code_object CodeObjectNode shared by all specializations and the
+ fused function
+
+ fused_compound_types All fused (compound) types (e.g. floating[:])
+ """
+
+ __signatures__ = None
+ resulting_fused_function = None
+ fused_func_assignment = None
+ defaults_tuple = None
+ decorators = None
+
+ child_attrs = StatListNode.child_attrs + [
+ '__signatures__', 'resulting_fused_function', 'fused_func_assignment']
+
+ def __init__(self, node, env):
+ super(FusedCFuncDefNode, self).__init__(node.pos)
+
+ self.nodes = []
+ self.node = node
+
+ is_def = isinstance(self.node, DefNode)
+ if is_def:
+ # self.node.decorators = []
+ self.copy_def(env)
+ else:
+ self.copy_cdef(env)
+
+ # Perform some sanity checks. If anything fails, it's a bug
+ for n in self.nodes:
+ assert not n.entry.type.is_fused
+ assert not n.local_scope.return_type.is_fused
+ if node.return_type.is_fused:
+ assert not n.return_type.is_fused
+
+ if not is_def and n.cfunc_declarator.optional_arg_count:
+ assert n.type.op_arg_struct
+
+ node.entry.fused_cfunction = self
+ # Copy the nodes as AnalyseDeclarationsTransform will prepend
+ # self.py_func to self.stats, as we only want specialized
+ # CFuncDefNodes in self.nodes
+ self.stats = self.nodes[:]
+
+ def copy_def(self, env):
+ """
+ Create a copy of the original def or lambda function for specialized
+ versions.
+ """
+ fused_compound_types = PyrexTypes.unique(
+ [arg.type for arg in self.node.args if arg.type.is_fused])
+ fused_types = self._get_fused_base_types(fused_compound_types)
+ permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
+
+ self.fused_compound_types = fused_compound_types
+
+ if self.node.entry in env.pyfunc_entries:
+ env.pyfunc_entries.remove(self.node.entry)
+
+ for cname, fused_to_specific in permutations:
+ copied_node = copy.deepcopy(self.node)
+ # keep signature object identity for special casing in DefNode.analyse_declarations()
+ copied_node.entry.signature = self.node.entry.signature
+
+ self._specialize_function_args(copied_node.args, fused_to_specific)
+ copied_node.return_type = self.node.return_type.specialize(
+ fused_to_specific)
+
+ copied_node.analyse_declarations(env)
+ # copied_node.is_staticmethod = self.node.is_staticmethod
+ # copied_node.is_classmethod = self.node.is_classmethod
+ self.create_new_local_scope(copied_node, env, fused_to_specific)
+ self.specialize_copied_def(copied_node, cname, self.node.entry,
+ fused_to_specific, fused_compound_types)
+
+ PyrexTypes.specialize_entry(copied_node.entry, cname)
+ copied_node.entry.used = True
+ env.entries[copied_node.entry.name] = copied_node.entry
+
+ if not self.replace_fused_typechecks(copied_node):
+ break
+
+ self.orig_py_func = self.node
+ self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
+
+ def copy_cdef(self, env):
+ """
+ Create a copy of the original c(p)def function for all specialized
+ versions.
+ """
+ permutations = self.node.type.get_all_specialized_permutations()
+ # print 'Node %s has %d specializations:' % (self.node.entry.name,
+ # len(permutations))
+ # import pprint; pprint.pprint([d for cname, d in permutations])
+
+ # Prevent copying of the python function
+ self.orig_py_func = orig_py_func = self.node.py_func
+ self.node.py_func = None
+ if orig_py_func:
+ env.pyfunc_entries.remove(orig_py_func.entry)
+
+ fused_types = self.node.type.get_fused_types()
+ self.fused_compound_types = fused_types
+
+ new_cfunc_entries = []
+ for cname, fused_to_specific in permutations:
+ copied_node = copy.deepcopy(self.node)
+
+ # Make the types in our CFuncType specific.
+ type = copied_node.type.specialize(fused_to_specific)
+ entry = copied_node.entry
+ type.specialize_entry(entry, cname)
+
+ # Reuse existing Entries (e.g. from .pxd files).
+ for i, orig_entry in enumerate(env.cfunc_entries):
+ if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
+ copied_node.entry = env.cfunc_entries[i]
+ if not copied_node.entry.func_cname:
+ copied_node.entry.func_cname = entry.func_cname
+ entry = copied_node.entry
+ type = entry.type
+ break
+ else:
+ new_cfunc_entries.append(entry)
+
+ copied_node.type = type
+ entry.type, type.entry = type, entry
+
+ entry.used = (entry.used or
+ self.node.entry.defined_in_pxd or
+ env.is_c_class_scope or
+ entry.is_cmethod)
+
+ if self.node.cfunc_declarator.optional_arg_count:
+ self.node.cfunc_declarator.declare_optional_arg_struct(
+ type, env, fused_cname=cname)
+
+ copied_node.return_type = type.return_type
+ self.create_new_local_scope(copied_node, env, fused_to_specific)
+
+ # Make the argument types in the CFuncDeclarator specific
+ self._specialize_function_args(copied_node.cfunc_declarator.args,
+ fused_to_specific)
+
+ # If a cpdef, declare all specialized cpdefs (this
+ # also calls analyse_declarations)
+ copied_node.declare_cpdef_wrapper(env)
+ if copied_node.py_func:
+ env.pyfunc_entries.remove(copied_node.py_func.entry)
+
+ self.specialize_copied_def(
+ copied_node.py_func, cname, self.node.entry.as_variable,
+ fused_to_specific, fused_types)
+
+ if not self.replace_fused_typechecks(copied_node):
+ break
+
+ # replace old entry with new entries
+ try:
+ cindex = env.cfunc_entries.index(self.node.entry)
+ except ValueError:
+ env.cfunc_entries.extend(new_cfunc_entries)
+ else:
+ env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+
+ if orig_py_func:
+ self.py_func = self.make_fused_cpdef(orig_py_func, env,
+ is_def=False)
+ else:
+ self.py_func = orig_py_func
+
+ def _get_fused_base_types(self, fused_compound_types):
+ """
+ Get a list of unique basic fused types, from a list of
+ (possibly) compound fused types.
+ """
+ base_types = []
+ seen = set()
+ for fused_type in fused_compound_types:
+ fused_type.get_fused_types(result=base_types, seen=seen)
+ return base_types
+
+ def _specialize_function_args(self, args, fused_to_specific):
+ for arg in args:
+ if arg.type.is_fused:
+ arg.type = arg.type.specialize(fused_to_specific)
+ if arg.type.is_memoryviewslice:
+ arg.type.validate_memslice_dtype(arg.pos)
+
+ def create_new_local_scope(self, node, env, f2s):
+ """
+ Create a new local scope for the copied node and append it to
+ self.nodes. A new local scope is needed because the arguments with the
+ fused types are already in the local scope, and we need the specialized
+ entries created after analyse_declarations on each specialized version
+ of the (CFunc)DefNode.
+ f2s is a dict mapping each fused type to its specialized version
+ """
+ node.create_local_scope(env)
+ node.local_scope.fused_to_specific = f2s
+
+ # This is copied from the original function, set it to false to
+ # stop recursion
+ node.has_fused_arguments = False
+ self.nodes.append(node)
+
+ def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
+ """Specialize the copy of a DefNode given the copied node,
+ the specialization cname and the original DefNode entry"""
+ fused_types = self._get_fused_base_types(fused_compound_types)
+ type_strings = [
+ PyrexTypes.specialization_signature_string(fused_type, f2s)
+ for fused_type in fused_types
+ ]
+
+ node.specialized_signature_string = '|'.join(type_strings)
+
+ node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
+ cname, node.entry.pymethdef_cname)
+ node.entry.doc = py_entry.doc
+ node.entry.doc_cname = py_entry.doc_cname
+
+ def replace_fused_typechecks(self, copied_node):
+ """
+ Branch-prune fused type checks like
+
+ if fused_t is int:
+ ...
+
+ Returns whether an error was issued and whether we should stop in
+ in order to prevent a flood of errors.
+ """
+ num_errors = Errors.num_errors
+ transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
+ copied_node.local_scope)
+ transform(copied_node)
+
+ if Errors.num_errors > num_errors:
+ return False
+
+ return True
+
+ def _fused_instance_checks(self, normal_types, pyx_code, env):
+ """
+ Generate Cython code for instance checks, matching an object to
+ specialized types.
+ """
+ for specialized_type in normal_types:
+ # all_numeric = all_numeric and specialized_type.is_numeric
+ pyx_code.context.update(
+ py_type_name=specialized_type.py_type_name(),
+ specialized_type_name=specialized_type.specialization_string,
+ )
+ pyx_code.put_chunk(
+ u"""
+ if isinstance(arg, {{py_type_name}}):
+ dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break
+ """)
+
+ def _dtype_name(self, dtype):
+ if dtype.is_typedef:
+ return '___pyx_%s' % dtype
+ return str(dtype).replace(' ', '_')
+
+ def _dtype_type(self, dtype):
+ if dtype.is_typedef:
+ return self._dtype_name(dtype)
+ return str(dtype)
+
+ def _sizeof_dtype(self, dtype):
+ if dtype.is_pyobject:
+ return 'sizeof(void *)'
+ else:
+ return "sizeof(%s)" % self._dtype_type(dtype)
+
+ def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
+ "Setup some common cases to match dtypes against specializations"
+ if pyx_code.indenter("if kind in b'iu':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_int")
+ pyx_code.dedent()
+
+ if pyx_code.indenter("elif kind == b'f':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_float")
+ pyx_code.dedent()
+
+ if pyx_code.indenter("elif kind == b'c':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_complex")
+ pyx_code.dedent()
+
+ if pyx_code.indenter("elif kind == b'O':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_object")
+ pyx_code.dedent()
+
+ match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
+ no_match = "dest_sig[{{dest_sig_idx}}] = None"
+ def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
+ """
+ Match a numpy dtype object to the individual specializations.
+ """
+ self._buffer_check_numpy_dtype_setup_cases(pyx_code)
+
+ for specialized_type in pythran_types+specialized_buffer_types:
+ final_type = specialized_type
+ if specialized_type.is_pythran_expr:
+ specialized_type = specialized_type.org_buffer
+ dtype = specialized_type.dtype
+ pyx_code.context.update(
+ itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
+ signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
+ dtype=dtype,
+ specialized_type_name=final_type.specialization_string)
+
+ dtypes = [
+ (dtype.is_int, pyx_code.dtype_int),
+ (dtype.is_float, pyx_code.dtype_float),
+ (dtype.is_complex, pyx_code.dtype_complex)
+ ]
+
+ for dtype_category, codewriter in dtypes:
+ if dtype_category:
+ cond = '{{itemsize_match}} and (<Py_ssize_t>arg.ndim) == %d' % (
+ specialized_type.ndim,)
+ if dtype.is_int:
+ cond += ' and {{signed_match}}'
+
+ if final_type.is_pythran_expr:
+ cond += ' and arg_is_pythran_compatible'
+
+ if codewriter.indenter("if %s:" % cond):
+ #codewriter.putln("print 'buffer match found based on numpy dtype'")
+ codewriter.putln(self.match)
+ codewriter.putln("break")
+ codewriter.dedent()
+
+ def _buffer_parse_format_string_check(self, pyx_code, decl_code,
+ specialized_type, env):
+ """
+ For each specialized type, try to coerce the object to a memoryview
+ slice of that type. This means obtaining a buffer and parsing the
+ format string.
+ TODO: separate buffer acquisition from format parsing
+ """
+ dtype = specialized_type.dtype
+ if specialized_type.is_buffer:
+ axes = [('direct', 'strided')] * specialized_type.ndim
+ else:
+ axes = specialized_type.axes
+
+ memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
+ memslice_type.create_from_py_utility_code(env)
+ pyx_code.context.update(
+ coerce_from_py_func=memslice_type.from_py_function,
+ dtype=dtype)
+ decl_code.putln(
+ "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
+
+ pyx_code.context.update(
+ specialized_type_name=specialized_type.specialization_string,
+ sizeof_dtype=self._sizeof_dtype(dtype))
+
+ pyx_code.put_chunk(
+ u"""
+ # try {{dtype}}
+ if itemsize == -1 or itemsize == {{sizeof_dtype}}:
+ memslice = {{coerce_from_py_func}}(arg, 0)
+ if memslice.memview:
+ __PYX_XDEC_MEMVIEW(&memslice, 1)
+ # print 'found a match for the buffer through format parsing'
+ %s
+ break
+ else:
+ __pyx_PyErr_Clear()
+ """ % self.match)
+
+ def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
+ """
+ Generate Cython code to match objects to buffer specializations.
+ First try to get a numpy dtype object and match it against the individual
+ specializations. If that fails, try naively to coerce the object
+ to each specialization, which obtains the buffer each time and tries
+ to match the format string.
+ """
+ # The first thing to find a match in this loop breaks out of the loop
+ pyx_code.put_chunk(
+ u"""
+ """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
+ if ndarray is not None:
+ if isinstance(arg, ndarray):
+ dtype = arg.dtype
+ """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
+ elif __pyx_memoryview_check(arg):
+ arg_base = arg.base
+ if isinstance(arg_base, ndarray):
+ dtype = arg_base.dtype
+ else:
+ dtype = None
+ else:
+ dtype = None
+
+ itemsize = -1
+ if dtype is not None:
+ itemsize = dtype.itemsize
+ kind = ord(dtype.kind)
+ dtype_signed = kind == 'i'
+ """)
+ pyx_code.indent(2)
+ if pythran_types:
+ pyx_code.put_chunk(
+ u"""
+ # Pythran only supports the endianness of the current compiler
+ byteorder = dtype.byteorder
+ if byteorder == "<" and not __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ elif byteorder == ">" and __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ if arg_is_pythran_compatible:
+ cur_stride = itemsize
+ shape = arg.shape
+ strides = arg.strides
+ for i in range(arg.ndim-1, -1, -1):
+ if (<Py_ssize_t>strides[i]) != cur_stride:
+ arg_is_pythran_compatible = False
+ break
+ cur_stride *= <Py_ssize_t> shape[i]
+ else:
+ arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
+ """)
+ pyx_code.named_insertion_point("numpy_dtype_checks")
+ self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
+ pyx_code.dedent(2)
+
+ for specialized_type in buffer_types:
+ self._buffer_parse_format_string_check(
+ pyx_code, decl_code, specialized_type, env)
+
+ def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
+ """
+ If we have any buffer specializations, write out some variable
+ declarations and imports.
+ """
+ decl_code.put_chunk(
+ u"""
+ ctypedef struct {{memviewslice_cname}}:
+ void *memview
+
+ void __PYX_XDEC_MEMVIEW({{memviewslice_cname}} *, int have_gil)
+ bint __pyx_memoryview_check(object)
+ """)
+
+ pyx_code.local_variable_declarations.put_chunk(
+ u"""
+ cdef {{memviewslice_cname}} memslice
+ cdef Py_ssize_t itemsize
+ cdef bint dtype_signed
+ cdef char kind
+
+ itemsize = -1
+ """)
+
+ if pythran_types:
+ pyx_code.local_variable_declarations.put_chunk(u"""
+ cdef bint arg_is_pythran_compatible
+ cdef Py_ssize_t cur_stride
+ """)
+
+ pyx_code.imports.put_chunk(
+ u"""
+ cdef type ndarray
+ ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
+ """)
+
+ seen_typedefs = set()
+ seen_int_dtypes = set()
+ for buffer_type in all_buffer_types:
+ dtype = buffer_type.dtype
+ dtype_name = self._dtype_name(dtype)
+ if dtype.is_typedef:
+ if dtype_name not in seen_typedefs:
+ seen_typedefs.add(dtype_name)
+ decl_code.putln(
+ 'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name,
+ dtype.empty_declaration_code()))
+
+ if buffer_type.dtype.is_int:
+ if str(dtype) not in seen_int_dtypes:
+ seen_int_dtypes.add(str(dtype))
+ pyx_code.context.update(dtype_name=dtype_name,
+ dtype_type=self._dtype_type(dtype))
+ pyx_code.local_variable_declarations.put_chunk(
+ u"""
+ cdef bint {{dtype_name}}_is_signed
+ {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
+ """)
+
+ def _split_fused_types(self, arg):
+ """
+ Specialize fused types and split into normal types and buffer types.
+ """
+ specialized_types = PyrexTypes.get_specialized_types(arg.type)
+
+ # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
+ specialized_types.sort()
+
+ seen_py_type_names = set()
+ normal_types, buffer_types, pythran_types = [], [], []
+ has_object_fallback = False
+ for specialized_type in specialized_types:
+ py_type_name = specialized_type.py_type_name()
+ if py_type_name:
+ if py_type_name in seen_py_type_names:
+ continue
+ seen_py_type_names.add(py_type_name)
+ if py_type_name == 'object':
+ has_object_fallback = True
+ else:
+ normal_types.append(specialized_type)
+ elif specialized_type.is_pythran_expr:
+ pythran_types.append(specialized_type)
+ elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
+ buffer_types.append(specialized_type)
+
+ return normal_types, buffer_types, pythran_types, has_object_fallback
+
+ def _unpack_argument(self, pyx_code):
+ pyx_code.put_chunk(
+ u"""
+ # PROCESSING ARGUMENT {{arg_tuple_idx}}
+ if {{arg_tuple_idx}} < len(<tuple>args):
+ arg = (<tuple>args)[{{arg_tuple_idx}}]
+ elif kwargs is not None and '{{arg.name}}' in <dict>kwargs:
+ arg = (<dict>kwargs)['{{arg.name}}']
+ else:
+ {{if arg.default}}
+ arg = (<tuple>defaults)[{{default_idx}}]
+ {{else}}
+ {{if arg_tuple_idx < min_positional_args}}
+ raise TypeError("Expected at least %d argument%s, got %d" % (
+ {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(<tuple>args)))
+ {{else}}
+ raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
+ {{endif}}
+ {{endif}}
+ """)
+
+ def make_fused_cpdef(self, orig_py_func, env, is_def):
+ """
+ This creates the function that is indexable from Python and does
+ runtime dispatch based on the argument types. The function gets the
+ arg tuple and kwargs dict (or None) and the defaults tuple
+ as arguments from the Binding Fused Function's tp_call.
+ """
+ from . import TreeFragment, Code, UtilityCode
+
+ fused_types = self._get_fused_base_types([
+ arg.type for arg in self.node.args if arg.type.is_fused])
+
+ context = {
+ 'memviewslice_cname': MemoryView.memviewslice_cname,
+ 'func_args': self.node.args,
+ 'n_fused': len(fused_types),
+ 'min_positional_args':
+ self.node.num_required_args - self.node.num_required_kw_args
+ if is_def else
+ sum(1 for arg in self.node.args if arg.default is None),
+ 'name': orig_py_func.entry.name,
+ }
+
+ pyx_code = Code.PyxCodeWriter(context=context)
+ decl_code = Code.PyxCodeWriter(context=context)
+ decl_code.put_chunk(
+ u"""
+ cdef extern from *:
+ void __pyx_PyErr_Clear "PyErr_Clear" ()
+ type __Pyx_ImportNumPyArrayTypeIfAvailable()
+ int __Pyx_Is_Little_Endian()
+ """)
+ decl_code.indent()
+
+ pyx_code.put_chunk(
+ u"""
+ def __pyx_fused_cpdef(signatures, args, kwargs, defaults):
+ # FIXME: use a typed signature - currently fails badly because
+ # default arguments inherit the types we specify here!
+
+ dest_sig = [None] * {{n_fused}}
+
+ if kwargs is not None and not kwargs:
+ kwargs = None
+
+ cdef Py_ssize_t i
+
+ # instance check body
+ """)
+
+ pyx_code.indent() # indent following code to function body
+ pyx_code.named_insertion_point("imports")
+ pyx_code.named_insertion_point("func_defs")
+ pyx_code.named_insertion_point("local_variable_declarations")
+
+ fused_index = 0
+ default_idx = 0
+ all_buffer_types = OrderedSet()
+ seen_fused_types = set()
+ for i, arg in enumerate(self.node.args):
+ if arg.type.is_fused:
+ arg_fused_types = arg.type.get_fused_types()
+ if len(arg_fused_types) > 1:
+ raise NotImplementedError("Determination of more than one fused base "
+ "type per argument is not implemented.")
+ fused_type = arg_fused_types[0]
+
+ if arg.type.is_fused and fused_type not in seen_fused_types:
+ seen_fused_types.add(fused_type)
+
+ context.update(
+ arg_tuple_idx=i,
+ arg=arg,
+ dest_sig_idx=fused_index,
+ default_idx=default_idx,
+ )
+
+ normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
+ self._unpack_argument(pyx_code)
+
+ # 'unrolled' loop, first match breaks out of it
+ if pyx_code.indenter("while 1:"):
+ if normal_types:
+ self._fused_instance_checks(normal_types, pyx_code, env)
+ if buffer_types or pythran_types:
+ env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
+ self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
+ if has_object_fallback:
+ pyx_code.context.update(specialized_type_name='object')
+ pyx_code.putln(self.match)
+ else:
+ pyx_code.putln(self.no_match)
+ pyx_code.putln("break")
+ pyx_code.dedent()
+
+ fused_index += 1
+ all_buffer_types.update(buffer_types)
+ all_buffer_types.update(ty.org_buffer for ty in pythran_types)
+
+ if arg.default:
+ default_idx += 1
+
+ if all_buffer_types:
+ self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
+ env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
+ env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
+
+ pyx_code.put_chunk(
+ u"""
+ candidates = []
+ for sig in <dict>signatures:
+ match_found = False
+ src_sig = sig.strip('()').split('|')
+ for i in range(len(dest_sig)):
+ dst_type = dest_sig[i]
+ if dst_type is not None:
+ if src_sig[i] == dst_type:
+ match_found = True
+ else:
+ match_found = False
+ break
+
+ if match_found:
+ candidates.append(sig)
+
+ if not candidates:
+ raise TypeError("No matching signature found")
+ elif len(candidates) > 1:
+ raise TypeError("Function call with ambiguous argument types")
+ else:
+ return (<dict>signatures)[candidates[0]]
+ """)
+
+ fragment_code = pyx_code.getvalue()
+ # print decl_code.getvalue()
+ # print fragment_code
+ from .Optimize import ConstantFolding
+ fragment = TreeFragment.TreeFragment(
+ fragment_code, level='module', pipeline=[ConstantFolding()])
+ ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
+ UtilityCode.declare_declarations_in_scope(
+ decl_code.getvalue(), env.global_scope())
+ ast.scope = env
+ # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
+ ast.analyse_declarations(env)
+ py_func = ast.stats[-1] # the DefNode
+ self.fragment_scope = ast.scope
+
+ if isinstance(self.node, DefNode):
+ py_func.specialized_cpdefs = self.nodes[:]
+ else:
+ py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
+
+ return py_func
+
+ def update_fused_defnode_entry(self, env):
+ copy_attributes = (
+ 'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
+ 'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
+ 'scope'
+ )
+
+ entry = self.py_func.entry
+
+ for attr in copy_attributes:
+ setattr(entry, attr,
+ getattr(self.orig_py_func.entry, attr))
+
+ self.py_func.name = self.orig_py_func.name
+ self.py_func.doc = self.orig_py_func.doc
+
+ env.entries.pop('__pyx_fused_cpdef', None)
+ if isinstance(self.node, DefNode):
+ env.entries[entry.name] = entry
+ else:
+ env.entries[entry.name].as_variable = entry
+
+ env.pyfunc_entries.append(entry)
+
+ self.py_func.entry.fused_cfunction = self
+ for node in self.nodes:
+ if isinstance(self.node, DefNode):
+ node.fused_py_func = self.py_func
+ else:
+ node.py_func.fused_py_func = self.py_func
+ node.entry.as_variable = entry
+
+ self.synthesize_defnodes()
+ self.stats.append(self.__signatures__)
+
+ def analyse_expressions(self, env):
+ """
+ Analyse the expressions. Take care to only evaluate default arguments
+ once and clone the result for all specializations
+ """
+ for fused_compound_type in self.fused_compound_types:
+ for fused_type in fused_compound_type.get_fused_types():
+ for specialization_type in fused_type.types:
+ if specialization_type.is_complex:
+ specialization_type.create_declaration_utility_code(env)
+
+ if self.py_func:
+ self.__signatures__ = self.__signatures__.analyse_expressions(env)
+ self.py_func = self.py_func.analyse_expressions(env)
+ self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
+ self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
+
+ self.defaults = defaults = []
+
+ for arg in self.node.args:
+ if arg.default:
+ arg.default = arg.default.analyse_expressions(env)
+ defaults.append(ProxyNode(arg.default))
+ else:
+ defaults.append(None)
+
+ for i, stat in enumerate(self.stats):
+ stat = self.stats[i] = stat.analyse_expressions(env)
+ if isinstance(stat, FuncDefNode):
+ for arg, default in zip(stat.args, defaults):
+ if default is not None:
+ arg.default = CloneNode(default).coerce_to(arg.type, env)
+
+ if self.py_func:
+ args = [CloneNode(default) for default in defaults if default]
+ self.defaults_tuple = TupleNode(self.pos, args=args)
+ self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
+ self.defaults_tuple = ProxyNode(self.defaults_tuple)
+ self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
+
+ fused_func = self.resulting_fused_function.arg
+ fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
+ fused_func.code_object = CloneNode(self.code_object)
+
+ for i, pycfunc in enumerate(self.specialized_pycfuncs):
+ pycfunc.code_object = CloneNode(self.code_object)
+ pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
+ pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
+ return self
+
+ def synthesize_defnodes(self):
+ """
+ Create the __signatures__ dict of PyCFunctionNode specializations.
+ """
+ if isinstance(self.nodes[0], CFuncDefNode):
+ nodes = [node.py_func for node in self.nodes]
+ else:
+ nodes = self.nodes
+
+ signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
+ for node in nodes]
+ keys = [ExprNodes.StringNode(node.pos, value=sig)
+ for node, sig in zip(nodes, signatures)]
+ values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
+ for node in nodes]
+
+ self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
+
+ self.specialized_pycfuncs = values
+ for pycfuncnode in values:
+ pycfuncnode.is_specialization = True
+
+ def generate_function_definitions(self, env, code):
+ if self.py_func:
+ self.py_func.pymethdef_required = True
+ self.fused_func_assignment.generate_function_definitions(env, code)
+
+ for stat in self.stats:
+ if isinstance(stat, FuncDefNode) and stat.entry.used:
+ code.mark_pos(stat.pos)
+ stat.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ # Note: all def function specialization are wrapped in PyCFunction
+ # nodes in the self.__signatures__ dictnode.
+ for default in self.defaults:
+ if default is not None:
+ default.generate_evaluation_code(code)
+
+ if self.py_func:
+ self.defaults_tuple.generate_evaluation_code(code)
+ self.code_object.generate_evaluation_code(code)
+
+ for stat in self.stats:
+ code.mark_pos(stat.pos)
+ if isinstance(stat, ExprNodes.ExprNode):
+ stat.generate_evaluation_code(code)
+ else:
+ stat.generate_execution_code(code)
+
+ if self.__signatures__:
+ self.resulting_fused_function.generate_evaluation_code(code)
+
+ code.putln(
+ "((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
+ (self.resulting_fused_function.result(),
+ self.__signatures__.result()))
+ code.put_giveref(self.__signatures__.result())
+ self.__signatures__.generate_post_assignment_code(code)
+ self.__signatures__.free_temps(code)
+
+ self.fused_func_assignment.generate_execution_code(code)
+
+ # Dispose of results
+ self.resulting_fused_function.generate_disposal_code(code)
+ self.resulting_fused_function.free_temps(code)
+ self.defaults_tuple.generate_disposal_code(code)
+ self.defaults_tuple.free_temps(code)
+ self.code_object.generate_disposal_code(code)
+ self.code_object.free_temps(code)
+
+ for default in self.defaults:
+ if default is not None:
+ default.generate_disposal_code(code)
+ default.free_temps(code)
+
+ def annotate(self, code):
+ for stat in self.stats:
+ stat.annotate(code)
diff --git a/contrib/tools/cython/Cython/Compiler/Future.py b/contrib/tools/cython/Cython/Compiler/Future.py
new file mode 100644
index 0000000000..848792e00b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Future.py
@@ -0,0 +1,15 @@
+def _get_feature(name):
+ import __future__
+ # fall back to a unique fake object for earlier Python versions or Python 3
+ return getattr(__future__, name, object())
+
+unicode_literals = _get_feature("unicode_literals")
+with_statement = _get_feature("with_statement") # dummy
+division = _get_feature("division")
+print_function = _get_feature("print_function")
+absolute_import = _get_feature("absolute_import")
+nested_scopes = _get_feature("nested_scopes") # dummy
+generators = _get_feature("generators") # dummy
+generator_stop = _get_feature("generator_stop")
+
+del _get_feature
diff --git a/contrib/tools/cython/Cython/Compiler/Interpreter.py b/contrib/tools/cython/Cython/Compiler/Interpreter.py
new file mode 100644
index 0000000000..9ec391f2a0
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Interpreter.py
@@ -0,0 +1,64 @@
+"""
+This module deals with interpreting the parse tree as Python
+would have done, in the compiler.
+
+For now this only covers parse tree to value conversion of
+compile-time values.
+"""
+
+from __future__ import absolute_import
+
+from .Nodes import *
+from .ExprNodes import *
+from .Errors import CompileError
+
+
+class EmptyScope(object):
+ def lookup(self, name):
+ return None
+
+empty_scope = EmptyScope()
+
+def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
+ """
+ Tries to interpret a list of compile time option nodes.
+ The result will be a tuple (optlist, optdict) but where
+ all expression nodes have been interpreted. The result is
+ in the form of tuples (value, pos).
+
+ optlist is a list of nodes, while optdict is a DictNode (the
+ result optdict is a dict)
+
+ If type_env is set, all type nodes will be analysed and the resulting
+ type set. Otherwise only interpretateable ExprNodes
+ are allowed, other nodes raises errors.
+
+ A CompileError will be raised if there are problems.
+ """
+
+ def interpret(node, ix):
+ if ix in type_args:
+ if type_env:
+ type = node.analyse_as_type(type_env)
+ if not type:
+ raise CompileError(node.pos, "Invalid type.")
+ return (type, node.pos)
+ else:
+ raise CompileError(node.pos, "Type not allowed here.")
+ else:
+ if (sys.version_info[0] >=3 and
+ isinstance(node, StringNode) and
+ node.unicode_value is not None):
+ return (node.unicode_value, node.pos)
+ return (node.compile_time_value(empty_scope), node.pos)
+
+ if optlist:
+ optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
+ if optdict:
+ assert isinstance(optdict, DictNode)
+ new_optdict = {}
+ for item in optdict.key_value_pairs:
+ new_key, dummy = interpret(item.key, None)
+ new_optdict[new_key] = interpret(item.value, item.key.value)
+ optdict = new_optdict
+ return (optlist, new_optdict)
diff --git a/contrib/tools/cython/Cython/Compiler/Lexicon.py b/contrib/tools/cython/Cython/Compiler/Lexicon.py
new file mode 100644
index 0000000000..72c9ceaefd
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Lexicon.py
@@ -0,0 +1,138 @@
+# cython: language_level=3, py2_import=True
+#
+# Cython Scanner - Lexical Definitions
+#
+
+from __future__ import absolute_import, unicode_literals
+
+raw_prefixes = "rR"
+bytes_prefixes = "bB"
+string_prefixes = "fFuU" + bytes_prefixes
+char_prefixes = "cC"
+any_string_prefix = raw_prefixes + string_prefixes + char_prefixes
+IDENT = 'IDENT'
+
+
+def make_lexicon():
+ from ..Plex import \
+ Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
+ TEXT, IGNORE, State, Lexicon
+ from .Scanning import Method
+
+ letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_")
+ digit = Any("0123456789")
+ bindigit = Any("01")
+ octdigit = Any("01234567")
+ hexdigit = Any("0123456789ABCDEFabcdef")
+ indentation = Bol + Rep(Any(" \t"))
+
+ def underscore_digits(d):
+ return Rep1(d) + Rep(Str("_") + Rep1(d))
+
+ decimal = underscore_digits(digit)
+ dot = Str(".")
+ exponent = Any("Ee") + Opt(Any("+-")) + decimal
+ decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
+
+ name = letter + Rep(letter | digit)
+ intconst = decimal | (Str("0") + ((Any("Xx") + underscore_digits(hexdigit)) |
+ (Any("Oo") + underscore_digits(octdigit)) |
+ (Any("Bb") + underscore_digits(bindigit)) ))
+ intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu")))
+ intliteral = intconst + intsuffix
+ fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
+ imagconst = (intconst | fltconst) + Any("jJ")
+
+ # invalid combinations of prefixes are caught in p_string_literal
+ beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) |
+ Any(char_prefixes)
+ ) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
+ two_oct = octdigit + octdigit
+ three_oct = octdigit + octdigit + octdigit
+ two_hex = hexdigit + hexdigit
+ four_hex = two_hex + two_hex
+ escapeseq = Str("\\") + (two_oct | three_oct |
+ Str('N{') + Rep(AnyBut('}')) + Str('}') |
+ Str('u') + four_hex | Str('x') + two_hex |
+ Str('U') + four_hex + four_hex | AnyChar)
+
+ bra = Any("([{")
+ ket = Any(")]}")
+ punct = Any(":,;+-*/|&<>=.%`~^?!@")
+ diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
+ "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
+ "<<=", ">>=", "**=", "//=", "->", "@=")
+ spaces = Rep1(Any(" \t\f"))
+ escaped_newline = Str("\\\n")
+ lineterm = Eol + Opt(Str("\n"))
+
+ comment = Str("#") + Rep(AnyBut("\n"))
+
+ return Lexicon([
+ (name, IDENT),
+ (intliteral, Method('strip_underscores', symbol='INT')),
+ (fltconst, Method('strip_underscores', symbol='FLOAT')),
+ (imagconst, Method('strip_underscores', symbol='IMAG')),
+ (punct | diphthong, TEXT),
+
+ (bra, Method('open_bracket_action')),
+ (ket, Method('close_bracket_action')),
+ (lineterm, Method('newline_action')),
+
+ (beginstring, Method('begin_string_action')),
+
+ (comment, IGNORE),
+ (spaces, IGNORE),
+ (escaped_newline, IGNORE),
+
+ State('INDENT', [
+ (comment + lineterm, Method('commentline')),
+ (Opt(spaces) + Opt(comment) + lineterm, IGNORE),
+ (indentation, Method('indentation_action')),
+ (Eof, Method('eof_action'))
+ ]),
+
+ State('SQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut("'\"\n\\")), 'CHARS'),
+ (Str('"'), 'CHARS'),
+ (Str("\n"), Method('unclosed_string_action')),
+ (Str("'"), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('DQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut('"\n\\')), 'CHARS'),
+ (Str("'"), 'CHARS'),
+ (Str("\n"), Method('unclosed_string_action')),
+ (Str('"'), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('TSQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut("'\"\n\\")), 'CHARS'),
+ (Any("'\""), 'CHARS'),
+ (Str("\n"), 'NEWLINE'),
+ (Str("'''"), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('TDQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut('"\'\n\\')), 'CHARS'),
+ (Any("'\""), 'CHARS'),
+ (Str("\n"), 'NEWLINE'),
+ (Str('"""'), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ (Eof, Method('eof_action'))
+ ],
+
+ # FIXME: Plex 1.9 needs different args here from Plex 1.1.4
+ #debug_flags = scanner_debug_flags,
+ #debug_file = scanner_dump_file
+ )
+
diff --git a/contrib/tools/cython/Cython/Compiler/Main.py b/contrib/tools/cython/Cython/Compiler/Main.py
new file mode 100644
index 0000000000..3f03b66ac9
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Main.py
@@ -0,0 +1,920 @@
+#
+# Cython Top Level
+#
+
+from __future__ import absolute_import
+
+import os
+import re
+import sys
+import io
+
+if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3):
+ sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
+ sys.exit(1)
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ basestring = str
+
+# Do not import Parsing here, import it when needed, because Parsing imports
+# Nodes, which globally needs debug command line options initialized to set a
+# conditional metaclass. These options are processed by CmdLine called from
+# main() in this file.
+# import Parsing
+from . import Errors
+from .StringEncoding import EncodedString
+from .Scanning import PyrexScanner, FileSourceDescriptor
+from .Errors import PyrexError, CompileError, error, warning
+from .Symtab import ModuleScope
+from .. import Utils
+from . import Options
+
+from . import Version # legacy import needed by old PyTables versions
+version = Version.version # legacy attribute - use "Cython.__version__" instead
+
+module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
+
+verbose = 0
+
+standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.path.pardir, 'Includes'))
+
+class CompilationData(object):
+ # Bundles the information that is passed from transform to transform.
+ # (For now, this is only)
+
+ # While Context contains every pxd ever loaded, path information etc.,
+ # this only contains the data related to a single compilation pass
+ #
+ # pyx ModuleNode Main code tree of this compilation.
+ # pxds {string : ModuleNode} Trees for the pxds used in the pyx.
+ # codewriter CCodeWriter Where to output final code.
+ # options CompilationOptions
+ # result CompilationResult
+ pass
+
+
+class Context(object):
+ # This class encapsulates the context needed for compiling
+ # one or more Cython implementation files along with their
+ # associated and imported declaration files. It includes
+ # the root of the module import namespace and the list
+ # of directories to search for include files.
+ #
+ # modules {string : ModuleScope}
+ # include_directories [string]
+ # future_directives [object]
+ # language_level int currently 2 or 3 for Python 2/3
+
+ cython_scope = None
+ language_level = None # warn when not set but default to Py2
+
+ def __init__(self, include_directories, compiler_directives, cpp=False,
+ language_level=None, options=None):
+ # cython_scope is a hack, set to False by subclasses, in order to break
+ # an infinite loop.
+ # Better code organization would fix it.
+
+ from . import Builtin, CythonScope
+ self.modules = {"__builtin__" : Builtin.builtin_scope}
+ self.cython_scope = CythonScope.create_cython_scope(self)
+ self.modules["cython"] = self.cython_scope
+ self.include_directories = include_directories
+ self.future_directives = set()
+ self.compiler_directives = compiler_directives
+ self.cpp = cpp
+ self.options = options
+
+ self.pxds = {} # full name -> node tree
+ self._interned = {} # (type(value), value, *key_args) -> interned_value
+
+ if language_level is not None:
+ self.set_language_level(language_level)
+
+ self.gdb_debug_outputwriter = None
+
+ def set_language_level(self, level):
+ from .Future import print_function, unicode_literals, absolute_import, division
+ future_directives = set()
+ if level == '3str':
+ level = 3
+ else:
+ level = int(level)
+ if level >= 3:
+ future_directives.add(unicode_literals)
+ if level >= 3:
+ future_directives.update([print_function, absolute_import, division])
+ self.language_level = level
+ self.future_directives = future_directives
+ if level >= 3:
+ self.modules['builtins'] = self.modules['__builtin__']
+
+ def intern_ustring(self, value, encoding=None):
+ key = (EncodedString, value, encoding)
+ try:
+ return self._interned[key]
+ except KeyError:
+ pass
+ value = EncodedString(value)
+ if encoding:
+ value.encoding = encoding
+ self._interned[key] = value
+ return value
+
+ def intern_value(self, value, *key):
+ key = (type(value), value) + key
+ try:
+ return self._interned[key]
+ except KeyError:
+ pass
+ self._interned[key] = value
+ return value
+
+ # pipeline creation functions can now be found in Pipeline.py
+
+ def process_pxd(self, source_desc, scope, module_name):
+ from . import Pipeline
+ if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
+ source = CompilationSource(source_desc, module_name, os.getcwd())
+ result_sink = create_default_resultobj(source, self.options)
+ pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
+ result = Pipeline.run_pipeline(pipeline, source)
+ else:
+ pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
+ result = Pipeline.run_pipeline(pipeline, source_desc)
+ return result
+
+ def nonfatal_error(self, exc):
+ return Errors.report_error(exc)
+
+ def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,
+ absolute_fallback=True):
+ # Finds and returns the module scope corresponding to
+ # the given relative or absolute module name. If this
+ # is the first time the module has been requested, finds
+ # the corresponding .pxd file and process it.
+ # If relative_to is not None, it must be a module scope,
+ # and the module will first be searched for relative to
+ # that module, provided its name is not a dotted name.
+ debug_find_module = 0
+ if debug_find_module:
+ print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
+ module_name, relative_to, pos, need_pxd))
+
+ scope = None
+ pxd_pathname = None
+ if relative_to:
+ if module_name:
+ # from .module import ...
+ qualified_name = relative_to.qualify_name(module_name)
+ else:
+ # from . import ...
+ qualified_name = relative_to.qualified_name
+ scope = relative_to
+ relative_to = None
+ else:
+ qualified_name = module_name
+
+ if not module_name_pattern.match(qualified_name):
+ raise CompileError(pos or (module_name, 0, 0),
+ "'%s' is not a valid module name" % module_name)
+
+ if relative_to:
+ if debug_find_module:
+ print("...trying relative import")
+ scope = relative_to.lookup_submodule(module_name)
+ if not scope:
+ pxd_pathname = self.find_pxd_file(qualified_name, pos)
+ if pxd_pathname:
+ scope = relative_to.find_submodule(module_name)
+ if not scope:
+ if debug_find_module:
+ print("...trying absolute import")
+ if absolute_fallback:
+ qualified_name = module_name
+ scope = self
+ for name in qualified_name.split("."):
+ scope = scope.find_submodule(name)
+
+ if debug_find_module:
+ print("...scope = %s" % scope)
+ if not scope.pxd_file_loaded:
+ if debug_find_module:
+ print("...pxd not loaded")
+ if not pxd_pathname:
+ if debug_find_module:
+ print("...looking for pxd file")
+ pxd_pathname = self.find_pxd_file(qualified_name, pos)
+ if debug_find_module:
+ print("......found %s" % pxd_pathname)
+ if not pxd_pathname and need_pxd:
+ # Set pxd_file_loaded such that we don't need to
+ # look for the non-existing pxd file next time.
+ scope.pxd_file_loaded = True
+ package_pathname = self.search_include_directories(qualified_name, ".py", pos)
+ if package_pathname and package_pathname.endswith('__init__.py'):
+ pass
+ else:
+ error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
+ if pxd_pathname:
+ scope.pxd_file_loaded = True
+ try:
+ if debug_find_module:
+ print("Context.find_module: Parsing %s" % pxd_pathname)
+ rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
+ if not pxd_pathname.endswith(rel_path):
+ rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
+ if Options.source_root:
+ rel_path = os.path.relpath(pxd_pathname, Options.source_root)
+ source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
+ err, result = self.process_pxd(source_desc, scope, qualified_name)
+ if err:
+ raise err
+ (pxd_codenodes, pxd_scope) = result
+ self.pxds[module_name] = (pxd_codenodes, pxd_scope)
+ except CompileError:
+ pass
+ return scope
+
+ def find_pxd_file(self, qualified_name, pos, sys_path=False):
+ # Search include path (and sys.path if sys_path is True) for
+ # the .pxd file corresponding to the given fully-qualified
+ # module name.
+ # Will find either a dotted filename or a file in a
+ # package directory. If a source file position is given,
+ # the directory containing the source file is searched first
+ # for a dotted filename, and its containing package root
+ # directory is searched first for a non-dotted filename.
+ pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
+ if pxd is None: # XXX Keep this until Includes/Deprecated is removed
+ if (qualified_name.startswith('python') or
+ qualified_name in ('stdlib', 'stdio', 'stl')):
+ standard_include_path = os.path.abspath(os.path.normpath(
+ os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
+ deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
+ self.include_directories.append(deprecated_include_path)
+ try:
+ pxd = self.search_include_directories(qualified_name, ".pxd", pos)
+ finally:
+ self.include_directories.pop()
+ if pxd:
+ name = qualified_name
+ if name.startswith('python'):
+ warning(pos, "'%s' is deprecated, use 'cpython'" % name, 1)
+ elif name in ('stdlib', 'stdio'):
+ warning(pos, "'%s' is deprecated, use 'libc.%s'" % (name, name), 1)
+ elif name in ('stl'):
+ warning(pos, "'%s' is deprecated, use 'libcpp.*.*'" % name, 1)
+ if pxd is None and Options.cimport_from_pyx:
+ return self.find_pyx_file(qualified_name, pos)
+ return pxd
+
+ def find_pyx_file(self, qualified_name, pos):
+ # Search include path for the .pyx file corresponding to the
+ # given fully-qualified module name, as for find_pxd_file().
+ return self.search_include_directories(qualified_name, ".pyx", pos)
+
+ def find_include_file(self, filename, pos):
+ # Search list of include directories for filename.
+ # Reports an error and returns None if not found.
+ path = self.search_include_directories(filename, "", pos,
+ include=True)
+ if not path:
+ error(pos, "'%s' not found" % filename)
+ return path
+
+ def search_include_directories(self, qualified_name, suffix, pos,
+ include=False, sys_path=False):
+ include_dirs = self.include_directories
+ if sys_path:
+ include_dirs = include_dirs + sys.path
+ # include_dirs must be hashable for caching in @cached_function
+ include_dirs = tuple(include_dirs + [standard_include_path])
+ return search_include_directories(include_dirs, qualified_name,
+ suffix, pos, include)
+
+ def find_root_package_dir(self, file_path):
+ return Utils.find_root_package_dir(file_path)
+
+ def check_package_dir(self, dir, package_names):
+ return Utils.check_package_dir(dir, tuple(package_names))
+
+ def c_file_out_of_date(self, source_path, output_path):
+ if not os.path.exists(output_path):
+ return 1
+ c_time = Utils.modification_time(output_path)
+ if Utils.file_newer_than(source_path, c_time):
+ return 1
+ pos = [source_path]
+ pxd_path = Utils.replace_suffix(source_path, ".pxd")
+ if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
+ return 1
+ for kind, name in self.read_dependency_file(source_path):
+ if kind == "cimport":
+ dep_path = self.find_pxd_file(name, pos)
+ elif kind == "include":
+ dep_path = self.search_include_directories(name, pos)
+ else:
+ continue
+ if dep_path and Utils.file_newer_than(dep_path, c_time):
+ return 1
+ return 0
+
+ def find_cimported_module_names(self, source_path):
+ return [ name for kind, name in self.read_dependency_file(source_path)
+ if kind == "cimport" ]
+
+ def is_package_dir(self, dir_path):
+ return Utils.is_package_dir(dir_path)
+
+ def read_dependency_file(self, source_path):
+ dep_path = Utils.replace_suffix(source_path, ".dep")
+ if os.path.exists(dep_path):
+ f = open(dep_path, "rU")
+ chunks = [ line.strip().split(" ", 1)
+ for line in f.readlines()
+ if " " in line.strip() ]
+ f.close()
+ return chunks
+ else:
+ return ()
+
+ def lookup_submodule(self, name):
+ # Look up a top-level module. Returns None if not found.
+ return self.modules.get(name, None)
+
+ def find_submodule(self, name):
+ # Find a top-level module, creating a new one if needed.
+ scope = self.lookup_submodule(name)
+ if not scope:
+ scope = ModuleScope(name,
+ parent_module = None, context = self)
+ self.modules[name] = scope
+ return scope
+
+ def parse(self, source_desc, scope, pxd, full_module_name):
+ if not isinstance(source_desc, FileSourceDescriptor):
+ raise RuntimeError("Only file sources for code supported")
+ source_filename = source_desc.filename
+ scope.cpp = self.cpp
+ # Parse the given source file and return a parse tree.
+ num_errors = Errors.num_errors
+ try:
+ with Utils.open_source_file(source_filename) as f:
+ from . import Parsing
+ s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
+ scope = scope, context = self)
+ tree = Parsing.p_module(s, pxd, full_module_name)
+ if self.options.formal_grammar:
+ try:
+ from ..Parser import ConcreteSyntaxTree
+ except ImportError:
+ raise RuntimeError(
+ "Formal grammar can only be used with compiled Cython with an available pgen.")
+ ConcreteSyntaxTree.p_module(source_filename)
+ except UnicodeDecodeError as e:
+ #import traceback
+ #traceback.print_exc()
+ raise self._report_decode_error(source_desc, e)
+
+ if Errors.num_errors > num_errors:
+ raise CompileError()
+ return tree
+
+ def _report_decode_error(self, source_desc, exc):
+ msg = exc.args[-1]
+ position = exc.args[2]
+ encoding = exc.args[0]
+
+ line = 1
+ column = idx = 0
+ with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
+ for line, data in enumerate(f, 1):
+ idx += len(data)
+ if idx >= position:
+ column = position - (idx - len(data)) + 1
+ break
+
+ return error((source_desc, line, column),
+ "Decoding error, missing or incorrect coding=<encoding-name> "
+ "at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
+
+ def extract_module_name(self, path, options):
+ # Find fully_qualified module name from the full pathname
+ # of a source file.
+ dir, filename = os.path.split(path)
+ module_name, _ = os.path.splitext(filename)
+ if "." in module_name:
+ return module_name
+ names = [module_name]
+ while self.is_package_dir(dir):
+ parent, package_name = os.path.split(dir)
+ if parent == dir:
+ break
+ names.append(package_name)
+ dir = parent
+ names.reverse()
+ return ".".join(names)
+
+ def setup_errors(self, options, result):
+ Errors.reset() # clear any remaining error state
+ if options.use_listing_file:
+ path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
+ else:
+ path = None
+ Errors.open_listing_file(path=path,
+ echo_to_stderr=options.errors_to_stderr)
+
+ def teardown_errors(self, err, options, result):
+ source_desc = result.compilation_source.source_desc
+ if not isinstance(source_desc, FileSourceDescriptor):
+ raise RuntimeError("Only file sources for code supported")
+ Errors.close_listing_file()
+ result.num_errors = Errors.num_errors
+ if result.num_errors > 0:
+ err = True
+ if err and result.c_file:
+ try:
+ Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
+ except EnvironmentError:
+ pass
+ result.c_file = None
+
+
+def get_output_filename(source_filename, cwd, options):
+ if options.cplus:
+ c_suffix = ".cpp"
+ else:
+ c_suffix = ".c"
+ suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
+ if options.output_file:
+ out_path = os.path.join(cwd, options.output_file)
+ if os.path.isdir(out_path):
+ return os.path.join(out_path, os.path.basename(suggested_file_name))
+ else:
+ return out_path
+ else:
+ return suggested_file_name
+
+
+def create_default_resultobj(compilation_source, options):
+ result = CompilationResult()
+ result.main_source_file = compilation_source.source_desc.filename
+ result.compilation_source = compilation_source
+ source_desc = compilation_source.source_desc
+ result.c_file = get_output_filename(source_desc.filename,
+ compilation_source.cwd, options)
+ result.embedded_metadata = options.embedded_metadata
+ return result
+
+
+def run_pipeline(source, options, full_module_name=None, context=None):
+ from . import Pipeline
+
+ source_ext = os.path.splitext(source)[1]
+ options.configure_language_defaults(source_ext[1:]) # py/pyx
+ if context is None:
+ context = options.create_context()
+
+ # Set up source object
+ cwd = os.getcwd()
+ abs_path = os.path.abspath(source)
+ full_module_name = full_module_name or options.module_name or context.extract_module_name(source, options)
+
+ Utils.raise_error_if_module_name_forbidden(full_module_name)
+
+ if options.relative_path_in_code_position_comments:
+ rel_path = full_module_name.replace('.', os.sep) + source_ext
+ if not abs_path.endswith(rel_path):
+ rel_path = source # safety measure to prevent printing incorrect paths
+ else:
+ rel_path = abs_path
+ if Options.source_root:
+ rel_path = os.path.relpath(abs_path, Options.source_root)
+ source_desc = FileSourceDescriptor(abs_path, rel_path)
+ source = CompilationSource(source_desc, full_module_name, cwd)
+
+ # Set up result object
+ result = create_default_resultobj(source, options)
+
+ if options.annotate is None:
+ # By default, decide based on whether an html file already exists.
+ html_filename = os.path.splitext(result.c_file)[0] + ".html"
+ if os.path.exists(html_filename):
+ with io.open(html_filename, "r", encoding="UTF-8") as html_file:
+ if u'<!-- Generated by Cython' in html_file.read(100):
+ options.annotate = True
+
+ # Get pipeline
+ if source_ext.lower() == '.py' or not source_ext:
+ pipeline = Pipeline.create_py_pipeline(context, options, result)
+ else:
+ pipeline = Pipeline.create_pyx_pipeline(context, options, result)
+
+ context.setup_errors(options, result)
+ err, enddata = Pipeline.run_pipeline(pipeline, source)
+ context.teardown_errors(err, options, result)
+ if err is None and options.depfile:
+ from ..Build.Dependencies import create_dependency_tree
+ dependencies = create_dependency_tree(context).all_dependencies(result.main_source_file)
+ Utils.write_depfile(result.c_file, result.main_source_file, dependencies)
+ return result
+
+
+# ------------------------------------------------------------------------
+#
+# Main Python entry points
+#
+# ------------------------------------------------------------------------
+
+class CompilationSource(object):
+ """
+ Contains the data necessary to start up a compilation pipeline for
+ a single compilation unit.
+ """
+ def __init__(self, source_desc, full_module_name, cwd):
+ self.source_desc = source_desc
+ self.full_module_name = full_module_name
+ self.cwd = cwd
+
+
+class CompilationOptions(object):
+ r"""
+ See default_options at the end of this module for a list of all possible
+ options and CmdLine.usage and CmdLine.parse_command_line() for their
+ meaning.
+ """
+ def __init__(self, defaults=None, **kw):
+ self.include_path = []
+ if defaults:
+ if isinstance(defaults, CompilationOptions):
+ defaults = defaults.__dict__
+ else:
+ defaults = default_options
+
+ options = dict(defaults)
+ options.update(kw)
+
+ # let's assume 'default_options' contains a value for most known compiler options
+ # and validate against them
+ unknown_options = set(options) - set(default_options)
+ # ignore valid options that are not in the defaults
+ unknown_options.difference_update(['include_path'])
+ if unknown_options:
+ message = "got unknown compilation option%s, please remove: %s" % (
+ 's' if len(unknown_options) > 1 else '',
+ ', '.join(unknown_options))
+ raise ValueError(message)
+
+ directive_defaults = Options.get_directive_defaults()
+ directives = dict(options['compiler_directives']) # copy mutable field
+ # check for invalid directives
+ unknown_directives = set(directives) - set(directive_defaults)
+ if unknown_directives:
+ message = "got unknown compiler directive%s: %s" % (
+ 's' if len(unknown_directives) > 1 else '',
+ ', '.join(unknown_directives))
+ raise ValueError(message)
+ options['compiler_directives'] = directives
+ if directives.get('np_pythran', False) and not options['cplus']:
+ import warnings
+ warnings.warn("C++ mode forced when in Pythran mode!")
+ options['cplus'] = True
+ if 'language_level' in directives and 'language_level' not in kw:
+ options['language_level'] = directives['language_level']
+ elif not options.get('language_level'):
+ options['language_level'] = directive_defaults.get('language_level')
+ if 'formal_grammar' in directives and 'formal_grammar' not in kw:
+ options['formal_grammar'] = directives['formal_grammar']
+ if options['cache'] is True:
+ options['cache'] = os.path.join(Utils.get_cython_cache_dir(), 'compiler')
+
+ self.__dict__.update(options)
+
+ def configure_language_defaults(self, source_extension):
+ if source_extension == 'py':
+ if self.compiler_directives.get('binding') is None:
+ self.compiler_directives['binding'] = True
+
+ def create_context(self):
+ return Context(self.include_path, self.compiler_directives,
+ self.cplus, self.language_level, options=self)
+
+ def get_fingerprint(self):
+ r"""
+ Return a string that contains all the options that are relevant for cache invalidation.
+ """
+ # Collect only the data that can affect the generated file(s).
+ data = {}
+
+ for key, value in self.__dict__.items():
+ if key in ['show_version', 'errors_to_stderr', 'verbose', 'quiet']:
+ # verbosity flags have no influence on the compilation result
+ continue
+ elif key in ['output_file', 'output_dir']:
+ # ignore the exact name of the output file
+ continue
+ elif key in ['timestamps']:
+ # the cache cares about the content of files, not about the timestamps of sources
+ continue
+ elif key in ['cache']:
+ # hopefully caching has no influence on the compilation result
+ continue
+ elif key in ['compiler_directives']:
+ # directives passed on to the C compiler do not influence the generated C code
+ continue
+ elif key in ['include_path']:
+ # this path changes which headers are tracked as dependencies,
+ # it has no influence on the generated C code
+ continue
+ elif key in ['working_path']:
+ # this path changes where modules and pxd files are found;
+ # their content is part of the fingerprint anyway, their
+ # absolute path does not matter
+ continue
+ elif key in ['create_extension']:
+ # create_extension() has already mangled the options, e.g.,
+ # embedded_metadata, when the fingerprint is computed so we
+ # ignore it here.
+ continue
+ elif key in ['build_dir']:
+ # the (temporary) directory where we collect dependencies
+ # has no influence on the C output
+ continue
+ elif key in ['use_listing_file', 'generate_pxi', 'annotate', 'annotate_coverage_xml']:
+ # all output files are contained in the cache so the types of
+ # files generated must be part of the fingerprint
+ data[key] = value
+ elif key in ['formal_grammar', 'evaluate_tree_assertions']:
+ # these bits can change whether compilation to C passes/fails
+ data[key] = value
+ elif key in ['embedded_metadata', 'emit_linenums', 'c_line_in_traceback', 'gdb_debug', 'relative_path_in_code_position_comments']:
+ # the generated code contains additional bits when these are set
+ data[key] = value
+ elif key in ['cplus', 'language_level', 'compile_time_env', 'np_pythran']:
+ # assorted bits that, e.g., influence the parser
+ data[key] = value
+ elif key == ['capi_reexport_cincludes']:
+ if self.capi_reexport_cincludes:
+ # our caching implementation does not yet include fingerprints of all the header files
+ raise NotImplementedError('capi_reexport_cincludes is not compatible with Cython caching')
+ elif key == ['common_utility_include_dir']:
+ if self.common_utility_include_dir:
+ raise NotImplementedError('common_utility_include_dir is not compatible with Cython caching yet')
+ else:
+ # any unexpected option should go into the fingerprint; it's better
+ # to recompile than to return incorrect results from the cache.
+ data[key] = value
+
+ def to_fingerprint(item):
+ r"""
+ Recursively turn item into a string, turning dicts into lists with
+ deterministic ordering.
+ """
+ if isinstance(item, dict):
+ item = sorted([(repr(key), to_fingerprint(value)) for key, value in item.items()])
+ return repr(item)
+
+ return to_fingerprint(data)
+
+
+class CompilationResult(object):
+ """
+ Results from the Cython compiler:
+
+ c_file string or None The generated C source file
+ h_file string or None The generated C header file
+ i_file string or None The generated .pxi file
+ api_file string or None The generated C API .h file
+ listing_file string or None File of error messages
+ object_file string or None Result of compiling the C file
+ extension_file string or None Result of linking the object file
+ num_errors integer Number of compilation errors
+ compilation_source CompilationSource
+ """
+
+ def __init__(self):
+ self.c_file = None
+ self.h_file = None
+ self.i_file = None
+ self.api_file = None
+ self.listing_file = None
+ self.object_file = None
+ self.extension_file = None
+ self.main_source_file = None
+
+
+class CompilationResultSet(dict):
+ """
+ Results from compiling multiple Pyrex source files. A mapping
+ from source file paths to CompilationResult instances. Also
+ has the following attributes:
+
+ num_errors integer Total number of compilation errors
+ """
+
+ num_errors = 0
+
+ def add(self, source, result):
+ self[source] = result
+ self.num_errors += result.num_errors
+
+
+def compile_single(source, options, full_module_name = None):
+ """
+ compile_single(source, options, full_module_name)
+
+ Compile the given Pyrex implementation file and return a CompilationResult.
+ Always compiles a single file; does not perform timestamp checking or
+ recursion.
+ """
+ return run_pipeline(source, options, full_module_name)
+
+
+def compile_multiple(sources, options):
+ """
+ compile_multiple(sources, options)
+
+ Compiles the given sequence of Pyrex implementation files and returns
+ a CompilationResultSet. Performs timestamp checking and/or recursion
+ if these are specified in the options.
+ """
+ if options.module_name and len(sources) > 1:
+ raise RuntimeError('Full module name can only be set '
+ 'for single source compilation')
+ # run_pipeline creates the context
+ # context = options.create_context()
+ sources = [os.path.abspath(source) for source in sources]
+ processed = set()
+ results = CompilationResultSet()
+ timestamps = options.timestamps
+ verbose = options.verbose
+ context = None
+ cwd = os.getcwd()
+ for source in sources:
+ if source not in processed:
+ if context is None:
+ context = options.create_context()
+ output_filename = get_output_filename(source, cwd, options)
+ out_of_date = context.c_file_out_of_date(source, output_filename)
+ if (not timestamps) or out_of_date:
+ if verbose:
+ sys.stderr.write("Compiling %s\n" % source)
+ result = run_pipeline(source, options,
+ full_module_name=options.module_name,
+ context=context)
+ results.add(source, result)
+ # Compiling multiple sources in one context doesn't quite
+ # work properly yet.
+ context = None
+ processed.add(source)
+ return results
+
+
+def compile(source, options = None, full_module_name = None, **kwds):
+ """
+ compile(source [, options], [, <option> = <value>]...)
+
+ Compile one or more Pyrex implementation files, with optional timestamp
+ checking and recursing on dependencies. The source argument may be a string
+ or a sequence of strings. If it is a string and no recursion or timestamp
+ checking is requested, a CompilationResult is returned, otherwise a
+ CompilationResultSet is returned.
+ """
+ options = CompilationOptions(defaults = options, **kwds)
+ if isinstance(source, basestring) and not options.timestamps:
+ return compile_single(source, options, full_module_name)
+ else:
+ return compile_multiple(source, options)
+
+
+@Utils.cached_function
+def search_include_directories(dirs, qualified_name, suffix, pos, include=False):
+ """
+ Search the list of include directories for the given file name.
+
+ If a source file position is given, first searches the directory
+ containing that file. Returns None if not found, but does not
+ report an error.
+
+ The 'include' option will disable package dereferencing.
+ """
+
+ if pos:
+ file_desc = pos[0]
+ if not isinstance(file_desc, FileSourceDescriptor):
+ raise RuntimeError("Only file sources for code supported")
+ if include:
+ dirs = (os.path.dirname(file_desc.filename),) + dirs
+ else:
+ dirs = (Utils.find_root_package_dir(file_desc.filename),) + dirs
+
+ dotted_filename = qualified_name
+ if suffix:
+ dotted_filename += suffix
+
+ if not include:
+ names = qualified_name.split('.')
+ package_names = tuple(names[:-1])
+ module_name = names[-1]
+ module_filename = module_name + suffix
+ package_filename = "__init__" + suffix
+
+ for dirname in dirs:
+ path = os.path.join(dirname, dotted_filename)
+ if os.path.exists(path):
+ return path
+
+ # Arcadia-specific lookup: search for packages in include paths,
+ # ignoring existence of __init__.py files as packages markers
+ # (they are not required by Arcadia build system)
+ if not include:
+ package_dir = os.path.join(dirname, *package_names)
+ path = os.path.join(package_dir, module_filename)
+ if os.path.exists(path):
+ return path
+ path = os.path.join(dirname, package_dir, module_name,
+ package_filename)
+ if os.path.exists(path):
+ return path
+
+ return None
+
+
+# ------------------------------------------------------------------------
+#
+# Main command-line entry point
+#
+# ------------------------------------------------------------------------
+
+def setuptools_main():
+ return main(command_line = 1)
+
+
+def main(command_line = 0):
+ args = sys.argv[1:]
+ any_failures = 0
+ if command_line:
+ from .CmdLine import parse_command_line
+ options, sources = parse_command_line(args)
+ else:
+ options = CompilationOptions(default_options)
+ sources = args
+
+ if options.show_version:
+ sys.stderr.write("Cython version %s\n" % version)
+ if options.working_path!="":
+ os.chdir(options.working_path)
+ try:
+ result = compile(sources, options)
+ if result.num_errors > 0:
+ any_failures = 1
+ except (EnvironmentError, PyrexError) as e:
+ sys.stderr.write(str(e) + '\n')
+ any_failures = 1
+ if any_failures:
+ sys.exit(1)
+
+
+# ------------------------------------------------------------------------
+#
+# Set the default options depending on the platform
+#
+# ------------------------------------------------------------------------
+
+default_options = dict(
+ show_version = 0,
+ use_listing_file = 0,
+ errors_to_stderr = 1,
+ cplus = 0,
+ output_file = None,
+ depfile = None,
+ annotate = None,
+ annotate_coverage_xml = None,
+ generate_pxi = 0,
+ capi_reexport_cincludes = 0,
+ working_path = "",
+ timestamps = None,
+ verbose = 0,
+ quiet = 0,
+ compiler_directives = {},
+ embedded_metadata = {},
+ evaluate_tree_assertions = False,
+ emit_linenums = False,
+ relative_path_in_code_position_comments = True,
+ c_line_in_traceback = True,
+ language_level = None, # warn but default to 2
+ formal_grammar = False,
+ gdb_debug = False,
+ init_suffix = None,
+ compile_time_env = None,
+ common_utility_include_dir = None,
+ output_dir=None,
+ build_dir=None,
+ cache=None,
+ create_extension=None,
+ module_name=None,
+ np_pythran=False
+)
diff --git a/contrib/tools/cython/Cython/Compiler/MemoryView.py b/contrib/tools/cython/Cython/Compiler/MemoryView.py
new file mode 100644
index 0000000000..0406d6c716
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/MemoryView.py
@@ -0,0 +1,858 @@
+from __future__ import absolute_import
+
+from .Errors import CompileError, error
+from . import ExprNodes
+from .ExprNodes import IntNode, NameNode, AttributeNode
+from . import Options
+from .Code import UtilityCode, TempitaUtilityCode
+from .UtilityCode import CythonUtilityCode
+from . import Buffer
+from . import PyrexTypes
+from . import ModuleNode
+
+START_ERR = "Start must not be given."
+STOP_ERR = "Axis specification only allowed in the 'step' slot."
+STEP_ERR = "Step must be omitted, 1, or a valid specifier."
+BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous."
+INVALID_ERR = "Invalid axis specification."
+NOT_CIMPORTED_ERR = "Variable was not cimported from cython.view"
+EXPR_ERR = "no expressions allowed in axis spec, only names and literals."
+CF_ERR = "Invalid axis specification for a C/Fortran contiguous array."
+ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the "
+ "GIL, consider using initializedcheck(False)")
+
+
+def concat_flags(*flags):
+ return "(%s)" % "|".join(flags)
+
+
+format_flag = "PyBUF_FORMAT"
+
+memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)"
+memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)"
+memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)"
+memview_full_access = "PyBUF_FULL_RO"
+#memview_strided_access = "PyBUF_STRIDED_RO"
+memview_strided_access = "PyBUF_RECORDS_RO"
+
+MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
+MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
+MEMVIEW_FULL = '__Pyx_MEMVIEW_FULL'
+MEMVIEW_CONTIG = '__Pyx_MEMVIEW_CONTIG'
+MEMVIEW_STRIDED= '__Pyx_MEMVIEW_STRIDED'
+MEMVIEW_FOLLOW = '__Pyx_MEMVIEW_FOLLOW'
+
+_spec_to_const = {
+ 'direct' : MEMVIEW_DIRECT,
+ 'ptr' : MEMVIEW_PTR,
+ 'full' : MEMVIEW_FULL,
+ 'contig' : MEMVIEW_CONTIG,
+ 'strided': MEMVIEW_STRIDED,
+ 'follow' : MEMVIEW_FOLLOW,
+ }
+
+_spec_to_abbrev = {
+ 'direct' : 'd',
+ 'ptr' : 'p',
+ 'full' : 'f',
+ 'contig' : 'c',
+ 'strided' : 's',
+ 'follow' : '_',
+}
+
+memslice_entry_init = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
+
+memview_name = u'memoryview'
+memview_typeptr_cname = '__pyx_memoryview_type'
+memview_objstruct_cname = '__pyx_memoryview_obj'
+memviewslice_cname = u'__Pyx_memviewslice'
+
+
+def put_init_entry(mv_cname, code):
+ code.putln("%s.data = NULL;" % mv_cname)
+ code.putln("%s.memview = NULL;" % mv_cname)
+
+
+#def axes_to_str(axes):
+# return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
+
+
+def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
+ have_gil=False, first_assignment=True):
+ "We can avoid decreffing the lhs if we know it is the first assignment"
+ assert rhs.type.is_memoryviewslice
+
+ pretty_rhs = rhs.result_in_temp() or rhs.is_simple()
+ if pretty_rhs:
+ rhstmp = rhs.result()
+ else:
+ rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
+ code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type)))
+
+ # Allow uninitialized assignment
+ #code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry))
+ put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code,
+ have_gil=have_gil, first_assignment=first_assignment)
+
+ if not pretty_rhs:
+ code.funcstate.release_temp(rhstmp)
+
+
+def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code,
+ have_gil=False, first_assignment=False):
+ if not first_assignment:
+ code.put_xdecref_memoryviewslice(lhs_cname, have_gil=have_gil)
+
+ if not rhs.result_in_temp():
+ rhs.make_owned_memoryviewslice(code)
+
+ code.putln("%s = %s;" % (lhs_cname, rhs_cname))
+
+
+def get_buf_flags(specs):
+ is_c_contig, is_f_contig = is_cf_contig(specs)
+
+ if is_c_contig:
+ return memview_c_contiguous
+ elif is_f_contig:
+ return memview_f_contiguous
+
+ access, packing = zip(*specs)
+
+ if 'full' in access or 'ptr' in access:
+ return memview_full_access
+ else:
+ return memview_strided_access
+
+
+def insert_newaxes(memoryviewtype, n):
+ axes = [('direct', 'strided')] * n
+ axes.extend(memoryviewtype.axes)
+ return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes)
+
+
+def broadcast_types(src, dst):
+ n = abs(src.ndim - dst.ndim)
+ if src.ndim < dst.ndim:
+ return insert_newaxes(src, n), dst
+ else:
+ return src, insert_newaxes(dst, n)
+
+
+def valid_memslice_dtype(dtype, i=0):
+ """
+ Return whether type dtype can be used as the base type of a
+ memoryview slice.
+
+ We support structs, numeric types and objects
+ """
+ if dtype.is_complex and dtype.real_type.is_int:
+ return False
+
+ if dtype is PyrexTypes.c_bint_type:
+ return False
+
+ if dtype.is_struct and dtype.kind == 'struct':
+ for member in dtype.scope.var_entries:
+ if not valid_memslice_dtype(member.type):
+ return False
+
+ return True
+
+ return (
+ dtype.is_error or
+ # Pointers are not valid (yet)
+ # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
+ (dtype.is_array and i < 8 and
+ valid_memslice_dtype(dtype.base_type, i + 1)) or
+ dtype.is_numeric or
+ dtype.is_pyobject or
+ dtype.is_fused or # accept this as it will be replaced by specializations later
+ (dtype.is_typedef and valid_memslice_dtype(dtype.typedef_base_type))
+ )
+
+
+class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
+ """
+ May be used during code generation time to be queried for
+ shape/strides/suboffsets attributes, or to perform indexing or slicing.
+ """
+ def __init__(self, entry):
+ self.entry = entry
+ self.type = entry.type
+ self.cname = entry.cname
+
+ self.buf_ptr = "%s.data" % self.cname
+
+ dtype = self.entry.type.dtype
+ self.buf_ptr_type = PyrexTypes.CPtrType(dtype)
+ self.init_attributes()
+
+ def get_buf_suboffsetvars(self):
+ return self._for_all_ndim("%s.suboffsets[%d]")
+
+ def get_buf_stridevars(self):
+ return self._for_all_ndim("%s.strides[%d]")
+
+ def get_buf_shapevars(self):
+ return self._for_all_ndim("%s.shape[%d]")
+
+ def generate_buffer_lookup_code(self, code, index_cnames):
+ axes = [(dim, index_cnames[dim], access, packing)
+ for dim, (access, packing) in enumerate(self.type.axes)]
+ return self._generate_buffer_lookup_code(code, axes)
+
+ def _generate_buffer_lookup_code(self, code, axes, cast_result=True):
+ """
+ Generate a single expression that indexes the memory view slice
+ in each dimension.
+ """
+ bufp = self.buf_ptr
+ type_decl = self.type.dtype.empty_declaration_code()
+
+ for dim, index, access, packing in axes:
+ shape = "%s.shape[%d]" % (self.cname, dim)
+ stride = "%s.strides[%d]" % (self.cname, dim)
+ suboffset = "%s.suboffsets[%d]" % (self.cname, dim)
+
+ flag = get_memoryview_flag(access, packing)
+
+ if flag in ("generic", "generic_contiguous"):
+ # Note: we cannot do cast tricks to avoid stride multiplication
+ # for generic_contiguous, as we may have to do (dtype *)
+ # or (dtype **) arithmetic, we won't know which unless
+ # we check suboffsets
+ code.globalstate.use_utility_code(memviewslice_index_helpers)
+ bufp = ('__pyx_memviewslice_index_full(%s, %s, %s, %s)' %
+ (bufp, index, stride, suboffset))
+
+ elif flag == "indirect":
+ bufp = "(%s + %s * %s)" % (bufp, index, stride)
+ bufp = ("(*((char **) %s) + %s)" % (bufp, suboffset))
+
+ elif flag == "indirect_contiguous":
+ # Note: we do char ** arithmetic
+ bufp = "(*((char **) %s + %s) + %s)" % (bufp, index, suboffset)
+
+ elif flag == "strided":
+ bufp = "(%s + %s * %s)" % (bufp, index, stride)
+
+ else:
+ assert flag == 'contiguous', flag
+ bufp = '((char *) (((%s *) %s) + %s))' % (type_decl, bufp, index)
+
+ bufp = '( /* dim=%d */ %s )' % (dim, bufp)
+
+ if cast_result:
+ return "((%s *) %s)" % (type_decl, bufp)
+
+ return bufp
+
+ def generate_buffer_slice_code(self, code, indices, dst, have_gil,
+ have_slices, directives):
+ """
+ Slice a memoryviewslice.
+
+ indices - list of index nodes. If not a SliceNode, or NoneNode,
+ then it must be coercible to Py_ssize_t
+
+ Simply call __pyx_memoryview_slice_memviewslice with the right
+ arguments, unless the dimension is omitted or a bare ':', in which
+ case we copy over the shape/strides/suboffsets attributes directly
+ for that dimension.
+ """
+ src = self.cname
+
+ code.putln("%(dst)s.data = %(src)s.data;" % locals())
+ code.putln("%(dst)s.memview = %(src)s.memview;" % locals())
+ code.put_incref_memoryviewslice(dst)
+
+ all_dimensions_direct = all(access == 'direct' for access, packing in self.type.axes)
+ suboffset_dim_temp = []
+
+ def get_suboffset_dim():
+ # create global temp variable at request
+ if not suboffset_dim_temp:
+ suboffset_dim = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = -1;" % suboffset_dim)
+ suboffset_dim_temp.append(suboffset_dim)
+ return suboffset_dim_temp[0]
+
+ dim = -1
+ new_ndim = 0
+ for index in indices:
+ if index.is_none:
+ # newaxis
+ for attrib, value in [('shape', 1), ('strides', 0), ('suboffsets', -1)]:
+ code.putln("%s.%s[%d] = %d;" % (dst, attrib, new_ndim, value))
+
+ new_ndim += 1
+ continue
+
+ dim += 1
+ access, packing = self.type.axes[dim]
+
+ if isinstance(index, ExprNodes.SliceNode):
+ # slice, unspecified dimension, or part of ellipsis
+ d = dict(locals())
+ for s in "start stop step".split():
+ idx = getattr(index, s)
+ have_idx = d['have_' + s] = not idx.is_none
+ d[s] = idx.result() if have_idx else "0"
+
+ if not (d['have_start'] or d['have_stop'] or d['have_step']):
+ # full slice (:), simply copy over the extent, stride
+ # and suboffset. Also update suboffset_dim if needed
+ d['access'] = access
+ util_name = "SimpleSlice"
+ else:
+ util_name = "ToughSlice"
+ d['error_goto'] = code.error_goto(index.pos)
+
+ new_ndim += 1
+ else:
+ # normal index
+ idx = index.result()
+
+ indirect = access != 'direct'
+ if indirect:
+ generic = access == 'full'
+ if new_ndim != 0:
+ return error(index.pos,
+ "All preceding dimensions must be "
+ "indexed and not sliced")
+
+ d = dict(
+ locals(),
+ wraparound=int(directives['wraparound']),
+ boundscheck=int(directives['boundscheck']),
+ )
+ if d['boundscheck']:
+ d['error_goto'] = code.error_goto(index.pos)
+ util_name = "SliceIndex"
+
+ _, impl = TempitaUtilityCode.load_as_string(util_name, "MemoryView_C.c", context=d)
+ code.put(impl)
+
+ if suboffset_dim_temp:
+ code.funcstate.release_temp(suboffset_dim_temp[0])
+
+
+def empty_slice(pos):
+ none = ExprNodes.NoneNode(pos)
+ return ExprNodes.SliceNode(pos, start=none,
+ stop=none, step=none)
+
+
+def unellipsify(indices, ndim):
+ result = []
+ seen_ellipsis = False
+ have_slices = False
+
+ newaxes = [newaxis for newaxis in indices if newaxis.is_none]
+ n_indices = len(indices) - len(newaxes)
+
+ for index in indices:
+ if isinstance(index, ExprNodes.EllipsisNode):
+ have_slices = True
+ full_slice = empty_slice(index.pos)
+
+ if seen_ellipsis:
+ result.append(full_slice)
+ else:
+ nslices = ndim - n_indices + 1
+ result.extend([full_slice] * nslices)
+ seen_ellipsis = True
+ else:
+ have_slices = have_slices or index.is_slice or index.is_none
+ result.append(index)
+
+ result_length = len(result) - len(newaxes)
+ if result_length < ndim:
+ have_slices = True
+ nslices = ndim - result_length
+ result.extend([empty_slice(indices[-1].pos)] * nslices)
+
+ return have_slices, result, newaxes
+
+
+def get_memoryview_flag(access, packing):
+ if access == 'full' and packing in ('strided', 'follow'):
+ return 'generic'
+ elif access == 'full' and packing == 'contig':
+ return 'generic_contiguous'
+ elif access == 'ptr' and packing in ('strided', 'follow'):
+ return 'indirect'
+ elif access == 'ptr' and packing == 'contig':
+ return 'indirect_contiguous'
+ elif access == 'direct' and packing in ('strided', 'follow'):
+ return 'strided'
+ else:
+ assert (access, packing) == ('direct', 'contig'), (access, packing)
+ return 'contiguous'
+
+
+def get_is_contig_func_name(contig_type, ndim):
+ assert contig_type in ('C', 'F')
+ return "__pyx_memviewslice_is_contig_%s%d" % (contig_type, ndim)
+
+
+def get_is_contig_utility(contig_type, ndim):
+ assert contig_type in ('C', 'F')
+ C = dict(context, ndim=ndim, contig_type=contig_type)
+ utility = load_memview_c_utility("MemviewSliceCheckContig", C, requires=[is_contig_utility])
+ return utility
+
+
+def slice_iter(slice_type, slice_result, ndim, code):
+ if slice_type.is_c_contig or slice_type.is_f_contig:
+ return ContigSliceIter(slice_type, slice_result, ndim, code)
+ else:
+ return StridedSliceIter(slice_type, slice_result, ndim, code)
+
+
+class SliceIter(object):
+ def __init__(self, slice_type, slice_result, ndim, code):
+ self.slice_type = slice_type
+ self.slice_result = slice_result
+ self.code = code
+ self.ndim = ndim
+
+
+class ContigSliceIter(SliceIter):
+ def start_loops(self):
+ code = self.code
+ code.begin_block()
+
+ type_decl = self.slice_type.dtype.empty_declaration_code()
+
+ total_size = ' * '.join("%s.shape[%d]" % (self.slice_result, i)
+ for i in range(self.ndim))
+ code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
+ code.putln("Py_ssize_t __pyx_temp_idx;")
+ code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
+ type_decl, type_decl, self.slice_result))
+ code.putln("for (__pyx_temp_idx = 0; "
+ "__pyx_temp_idx < __pyx_temp_extent; "
+ "__pyx_temp_idx++) {")
+
+ return "__pyx_temp_pointer"
+
+ def end_loops(self):
+ self.code.putln("__pyx_temp_pointer += 1;")
+ self.code.putln("}")
+ self.code.end_block()
+
+
+class StridedSliceIter(SliceIter):
+ def start_loops(self):
+ code = self.code
+ code.begin_block()
+
+ for i in range(self.ndim):
+ t = i, self.slice_result, i
+ code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
+ code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
+ code.putln("char *__pyx_temp_pointer_%d;" % i)
+ code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
+
+ code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_result)
+
+ for i in range(self.ndim):
+ if i > 0:
+ code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
+
+ code.putln("for (__pyx_temp_idx_%d = 0; "
+ "__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
+ "__pyx_temp_idx_%d++) {" % (i, i, i, i))
+
+ return "__pyx_temp_pointer_%d" % (self.ndim - 1)
+
+ def end_loops(self):
+ code = self.code
+ for i in range(self.ndim - 1, -1, -1):
+ code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
+ code.putln("}")
+
+ code.end_block()
+
+
+def copy_c_or_fortran_cname(memview):
+ if memview.is_c_contig:
+ c_or_f = 'c'
+ else:
+ c_or_f = 'f'
+
+ return "__pyx_memoryview_copy_slice_%s_%s" % (
+ memview.specialization_suffix(), c_or_f)
+
+
+def get_copy_new_utility(pos, from_memview, to_memview):
+ if (from_memview.dtype != to_memview.dtype and
+ not (from_memview.dtype.is_const and from_memview.dtype.const_base_type == to_memview.dtype)):
+ error(pos, "dtypes must be the same!")
+ return
+ if len(from_memview.axes) != len(to_memview.axes):
+ error(pos, "number of dimensions must be same")
+ return
+ if not (to_memview.is_c_contig or to_memview.is_f_contig):
+ error(pos, "to_memview must be c or f contiguous.")
+ return
+
+ for (access, packing) in from_memview.axes:
+ if access != 'direct':
+ error(pos, "cannot handle 'full' or 'ptr' access at this time.")
+ return
+
+ if to_memview.is_c_contig:
+ mode = 'c'
+ contig_flag = memview_c_contiguous
+ elif to_memview.is_f_contig:
+ mode = 'fortran'
+ contig_flag = memview_f_contiguous
+
+ return load_memview_c_utility(
+ "CopyContentsUtility",
+ context=dict(
+ context,
+ mode=mode,
+ dtype_decl=to_memview.dtype.empty_declaration_code(),
+ contig_flag=contig_flag,
+ ndim=to_memview.ndim,
+ func_cname=copy_c_or_fortran_cname(to_memview),
+ dtype_is_object=int(to_memview.dtype.is_pyobject)),
+ requires=[copy_contents_new_utility])
+
+
+def get_axes_specs(env, axes):
+ '''
+ get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
+ access is one of 'full', 'ptr' or 'direct'
+ packing is one of 'contig', 'strided' or 'follow'
+ '''
+
+ cythonscope = env.global_scope().context.cython_scope
+ cythonscope.load_cythonscope()
+ viewscope = cythonscope.viewscope
+
+ access_specs = tuple([viewscope.lookup(name)
+ for name in ('full', 'direct', 'ptr')])
+ packing_specs = tuple([viewscope.lookup(name)
+ for name in ('contig', 'strided', 'follow')])
+
+ is_f_contig, is_c_contig = False, False
+ default_access, default_packing = 'direct', 'strided'
+ cf_access, cf_packing = default_access, 'follow'
+
+ axes_specs = []
+ # analyse all axes.
+ for idx, axis in enumerate(axes):
+ if not axis.start.is_none:
+ raise CompileError(axis.start.pos, START_ERR)
+
+ if not axis.stop.is_none:
+ raise CompileError(axis.stop.pos, STOP_ERR)
+
+ if axis.step.is_none:
+ axes_specs.append((default_access, default_packing))
+
+ elif isinstance(axis.step, IntNode):
+ # the packing for the ::1 axis is contiguous,
+ # all others are cf_packing.
+ if axis.step.compile_time_value(env) != 1:
+ raise CompileError(axis.step.pos, STEP_ERR)
+
+ axes_specs.append((cf_access, 'cfcontig'))
+
+ elif isinstance(axis.step, (NameNode, AttributeNode)):
+ entry = _get_resolved_spec(env, axis.step)
+ if entry.name in view_constant_to_access_packing:
+ axes_specs.append(view_constant_to_access_packing[entry.name])
+ else:
+ raise CompileError(axis.step.pos, INVALID_ERR)
+
+ else:
+ raise CompileError(axis.step.pos, INVALID_ERR)
+
+ # First, find out if we have a ::1 somewhere
+ contig_dim = 0
+ is_contig = False
+ for idx, (access, packing) in enumerate(axes_specs):
+ if packing == 'cfcontig':
+ if is_contig:
+ raise CompileError(axis.step.pos, BOTH_CF_ERR)
+
+ contig_dim = idx
+ axes_specs[idx] = (access, 'contig')
+ is_contig = True
+
+ if is_contig:
+ # We have a ::1 somewhere, see if we're C or Fortran contiguous
+ if contig_dim == len(axes) - 1:
+ is_c_contig = True
+ else:
+ is_f_contig = True
+
+ if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'):
+ raise CompileError(axes[contig_dim].pos,
+ "Fortran contiguous specifier must follow an indirect dimension")
+
+ if is_c_contig:
+ # Contiguous in the last dimension, find the last indirect dimension
+ contig_dim = -1
+ for idx, (access, packing) in enumerate(reversed(axes_specs)):
+ if access in ('ptr', 'full'):
+ contig_dim = len(axes) - idx - 1
+
+ # Replace 'strided' with 'follow' for any dimension following the last
+ # indirect dimension, the first dimension or the dimension following
+ # the ::1.
+ # int[::indirect, ::1, :, :]
+ # ^ ^
+ # int[::indirect, :, :, ::1]
+ # ^ ^
+ start = contig_dim + 1
+ stop = len(axes) - is_c_contig
+ for idx, (access, packing) in enumerate(axes_specs[start:stop]):
+ idx = contig_dim + 1 + idx
+ if access != 'direct':
+ raise CompileError(axes[idx].pos,
+ "Indirect dimension may not follow "
+ "Fortran contiguous dimension")
+ if packing == 'contig':
+ raise CompileError(axes[idx].pos,
+ "Dimension may not be contiguous")
+ axes_specs[idx] = (access, cf_packing)
+
+ if is_c_contig:
+ # For C contiguity, we need to fix the 'contig' dimension
+ # after the loop
+ a, p = axes_specs[-1]
+ axes_specs[-1] = a, 'contig'
+
+ validate_axes_specs([axis.start.pos for axis in axes],
+ axes_specs,
+ is_c_contig,
+ is_f_contig)
+
+ return axes_specs
+
+
+def validate_axes(pos, axes):
+ if len(axes) >= Options.buffer_max_dims:
+ error(pos, "More dimensions than the maximum number"
+ " of buffer dimensions were used.")
+ return False
+
+ return True
+
+
+def is_cf_contig(specs):
+ is_c_contig = is_f_contig = False
+
+ if len(specs) == 1 and specs == [('direct', 'contig')]:
+ is_c_contig = True
+
+ elif (specs[-1] == ('direct','contig') and
+ all(axis == ('direct','follow') for axis in specs[:-1])):
+ # c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
+ is_c_contig = True
+
+ elif (len(specs) > 1 and
+ specs[0] == ('direct','contig') and
+ all(axis == ('direct','follow') for axis in specs[1:])):
+ # f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
+ is_f_contig = True
+
+ return is_c_contig, is_f_contig
+
+
+def get_mode(specs):
+ is_c_contig, is_f_contig = is_cf_contig(specs)
+
+ if is_c_contig:
+ return 'c'
+ elif is_f_contig:
+ return 'fortran'
+
+ for access, packing in specs:
+ if access in ('ptr', 'full'):
+ return 'full'
+
+ return 'strided'
+
+view_constant_to_access_packing = {
+ 'generic': ('full', 'strided'),
+ 'strided': ('direct', 'strided'),
+ 'indirect': ('ptr', 'strided'),
+ 'generic_contiguous': ('full', 'contig'),
+ 'contiguous': ('direct', 'contig'),
+ 'indirect_contiguous': ('ptr', 'contig'),
+}
+
+def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
+
+ packing_specs = ('contig', 'strided', 'follow')
+ access_specs = ('direct', 'ptr', 'full')
+
+ # is_c_contig, is_f_contig = is_cf_contig(specs)
+
+ has_contig = has_follow = has_strided = has_generic_contig = False
+
+ last_indirect_dimension = -1
+ for idx, (access, packing) in enumerate(specs):
+ if access == 'ptr':
+ last_indirect_dimension = idx
+
+ for idx, (pos, (access, packing)) in enumerate(zip(positions, specs)):
+
+ if not (access in access_specs and
+ packing in packing_specs):
+ raise CompileError(pos, "Invalid axes specification.")
+
+ if packing == 'strided':
+ has_strided = True
+ elif packing == 'contig':
+ if has_contig:
+ raise CompileError(pos, "Only one direct contiguous "
+ "axis may be specified.")
+
+ valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1
+ if idx not in valid_contig_dims and access != 'ptr':
+ if last_indirect_dimension + 1 != len(specs) - 1:
+ dims = "dimensions %d and %d" % valid_contig_dims
+ else:
+ dims = "dimension %d" % valid_contig_dims[0]
+
+ raise CompileError(pos, "Only %s may be contiguous and direct" % dims)
+
+ has_contig = access != 'ptr'
+ elif packing == 'follow':
+ if has_strided:
+ raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
+ if not (is_c_contig or is_f_contig):
+ raise CompileError(pos, "Invalid use of the follow specifier.")
+
+ if access in ('ptr', 'full'):
+ has_strided = False
+
+def _get_resolved_spec(env, spec):
+ # spec must be a NameNode or an AttributeNode
+ if isinstance(spec, NameNode):
+ return _resolve_NameNode(env, spec)
+ elif isinstance(spec, AttributeNode):
+ return _resolve_AttributeNode(env, spec)
+ else:
+ raise CompileError(spec.pos, INVALID_ERR)
+
+def _resolve_NameNode(env, node):
+ try:
+ resolved_name = env.lookup(node.name).name
+ except AttributeError:
+ raise CompileError(node.pos, INVALID_ERR)
+
+ viewscope = env.global_scope().context.cython_scope.viewscope
+ entry = viewscope.lookup(resolved_name)
+ if entry is None:
+ raise CompileError(node.pos, NOT_CIMPORTED_ERR)
+
+ return entry
+
+def _resolve_AttributeNode(env, node):
+ path = []
+ while isinstance(node, AttributeNode):
+ path.insert(0, node.attribute)
+ node = node.obj
+ if isinstance(node, NameNode):
+ path.insert(0, node.name)
+ else:
+ raise CompileError(node.pos, EXPR_ERR)
+ modnames = path[:-1]
+ # must be at least 1 module name, o/w not an AttributeNode.
+ assert modnames
+
+ scope = env
+ for modname in modnames:
+ mod = scope.lookup(modname)
+ if not mod or not mod.as_module:
+ raise CompileError(
+ node.pos, "undeclared name not builtin: %s" % modname)
+ scope = mod.as_module
+
+ entry = scope.lookup(path[-1])
+ if not entry:
+ raise CompileError(node.pos, "No such attribute '%s'" % path[-1])
+
+ return entry
+
+#
+### Utility loading
+#
+
+def load_memview_cy_utility(util_code_name, context=None, **kwargs):
+ return CythonUtilityCode.load(util_code_name, "MemoryView.pyx",
+ context=context, **kwargs)
+
+def load_memview_c_utility(util_code_name, context=None, **kwargs):
+ if context is None:
+ return UtilityCode.load(util_code_name, "MemoryView_C.c", **kwargs)
+ else:
+ return TempitaUtilityCode.load(util_code_name, "MemoryView_C.c",
+ context=context, **kwargs)
+
+def use_cython_array_utility_code(env):
+ cython_scope = env.global_scope().context.cython_scope
+ cython_scope.load_cythonscope()
+ cython_scope.viewscope.lookup('array_cwrapper').used = True
+
+context = {
+ 'memview_struct_name': memview_objstruct_cname,
+ 'max_dims': Options.buffer_max_dims,
+ 'memviewslice_name': memviewslice_cname,
+ 'memslice_init': memslice_entry_init,
+}
+memviewslice_declare_code = load_memview_c_utility(
+ "MemviewSliceStruct",
+ context=context,
+ requires=[])
+
+atomic_utility = load_memview_c_utility("Atomics", context)
+
+memviewslice_init_code = load_memview_c_utility(
+ "MemviewSliceInit",
+ context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims),
+ requires=[memviewslice_declare_code,
+ atomic_utility],
+)
+
+memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
+
+typeinfo_to_format_code = load_memview_cy_utility(
+ "BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
+
+is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context)
+overlapping_utility = load_memview_c_utility("OverlappingSlices", context)
+copy_contents_new_utility = load_memview_c_utility(
+ "MemviewSliceCopyTemplate",
+ context,
+ requires=[], # require cython_array_utility_code
+)
+
+view_utility_code = load_memview_cy_utility(
+ "View.MemoryView",
+ context=context,
+ requires=[Buffer.GetAndReleaseBufferUtilityCode(),
+ Buffer.buffer_struct_declare_code,
+ Buffer.buffer_formats_declare_code,
+ memviewslice_init_code,
+ is_contig_utility,
+ overlapping_utility,
+ copy_contents_new_utility,
+ ModuleNode.capsule_utility_code],
+)
+view_utility_whitelist = ('array', 'memoryview', 'array_cwrapper',
+ 'generic', 'strided', 'indirect', 'contiguous',
+ 'indirect_contiguous')
+
+memviewslice_declare_code.requires.append(view_utility_code)
+copy_contents_new_utility.requires.append(view_utility_code)
diff --git a/contrib/tools/cython/Cython/Compiler/ModuleNode.py b/contrib/tools/cython/Cython/Compiler/ModuleNode.py
new file mode 100644
index 0000000000..a9b1a492c4
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/ModuleNode.py
@@ -0,0 +1,3223 @@
+#
+# Module parse tree node
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
+ error=object, warning=object, py_object_type=object, UtilityCode=object,
+ EncodedString=object, re=object)
+
+from collections import defaultdict
+import json
+import operator
+import os
+import re
+
+from .PyrexTypes import CPtrType
+from . import Future
+from . import Annotate
+from . import Code
+from . import Naming
+from . import Nodes
+from . import Options
+from . import TypeSlots
+from . import PyrexTypes
+from . import Pythran
+
+from .Errors import error, warning
+from .PyrexTypes import py_object_type
+from ..Utils import open_new_file, replace_suffix, decode_filename, build_hex_version
+from .Code import UtilityCode, IncludeCode
+from .StringEncoding import EncodedString
+from .Pythran import has_np_pythran
+
+def check_c_declarations_pxd(module_node):
+ module_node.scope.check_c_classes_pxd()
+ return module_node
+
+
+def check_c_declarations(module_node):
+ module_node.scope.check_c_classes()
+ module_node.scope.check_c_functions()
+ return module_node
+
+
+def generate_c_code_config(env, options):
+ if Options.annotate or options.annotate:
+ emit_linenums = False
+ else:
+ emit_linenums = options.emit_linenums
+
+ return Code.CCodeConfig(
+ emit_linenums=emit_linenums,
+ emit_code_comments=env.directives['emit_code_comments'],
+ c_line_in_traceback=options.c_line_in_traceback)
+
+
+class ModuleNode(Nodes.Node, Nodes.BlockNode):
+ # doc string or None
+ # body StatListNode
+ #
+ # referenced_modules [ModuleScope]
+ # full_module_name string
+ #
+ # scope The module scope.
+ # compilation_source A CompilationSource (see Main)
+ # directives Top-level compiler directives
+
+ child_attrs = ["body"]
+ directives = None
+
+ def merge_in(self, tree, scope, merge_scope=False):
+ # Merges in the contents of another tree, and possibly scope. With the
+ # current implementation below, this must be done right prior
+ # to code generation.
+ #
+ # Note: This way of doing it seems strange -- I believe the
+ # right concept is to split ModuleNode into a ModuleNode and a
+ # CodeGenerator, and tell that CodeGenerator to generate code
+ # from multiple sources.
+ assert isinstance(self.body, Nodes.StatListNode)
+ if isinstance(tree, Nodes.StatListNode):
+ self.body.stats.extend(tree.stats)
+ else:
+ self.body.stats.append(tree)
+
+ self.scope.utility_code_list.extend(scope.utility_code_list)
+
+ for inc in scope.c_includes.values():
+ self.scope.process_include(inc)
+
+ def extend_if_not_in(L1, L2):
+ for x in L2:
+ if x not in L1:
+ L1.append(x)
+
+ extend_if_not_in(self.scope.included_files, scope.included_files)
+
+ if merge_scope:
+ # Ensure that we don't generate import code for these entries!
+ for entry in scope.c_class_entries:
+ entry.type.module_name = self.full_module_name
+ entry.type.scope.directives["internal"] = True
+
+ self.scope.merge_in(scope)
+
+ def analyse_declarations(self, env):
+ if has_np_pythran(env):
+ Pythran.include_pythran_generic(env)
+ if self.directives:
+ env.old_style_globals = self.directives['old_style_globals']
+ if not Options.docstrings:
+ env.doc = self.doc = None
+ elif Options.embed_pos_in_docstring:
+ env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
+ if self.doc is not None:
+ env.doc = EncodedString(env.doc + u'\n' + self.doc)
+ env.doc.encoding = self.doc.encoding
+ else:
+ env.doc = self.doc
+ env.directives = self.directives
+
+ self.body.analyse_declarations(env)
+
+ def prepare_utility_code(self):
+ # prepare any utility code that must be created before code generation
+ # specifically: CythonUtilityCode
+ env = self.scope
+ if env.has_import_star:
+ self.create_import_star_conversion_utility_code(env)
+ for name, entry in sorted(env.entries.items()):
+ if (entry.create_wrapper and entry.scope is env
+ and entry.is_type and entry.type.is_enum):
+ entry.type.create_type_wrapper(env)
+
+ def process_implementation(self, options, result):
+ env = self.scope
+ env.return_type = PyrexTypes.c_void_type
+ self.referenced_modules = []
+ self.find_referenced_modules(env, self.referenced_modules, {})
+ self.sort_cdef_classes(env)
+ self.generate_c_code(env, options, result)
+ self.generate_h_code(env, options, result)
+ self.generate_api_code(env, options, result)
+
+ def has_imported_c_functions(self):
+ for module in self.referenced_modules:
+ for entry in module.cfunc_entries:
+ if entry.defined_in_pxd:
+ return 1
+ return 0
+
+ def generate_h_code(self, env, options, result):
+ def h_entries(entries, api=0, pxd=0):
+ return [entry for entry in entries
+ if ((entry.visibility == 'public') or
+ (api and entry.api) or
+ (pxd and entry.defined_in_pxd))]
+ h_types = h_entries(env.type_entries, api=1)
+ h_vars = h_entries(env.var_entries)
+ h_funcs = h_entries(env.cfunc_entries)
+ h_extension_types = h_entries(env.c_class_entries)
+ if h_types or h_vars or h_funcs or h_extension_types:
+ result.h_file = replace_suffix(result.c_file, ".h")
+ h_code = Code.CCodeWriter()
+ c_code_config = generate_c_code_config(env, options)
+ Code.GlobalState(h_code, self, c_code_config)
+ if options.generate_pxi:
+ result.i_file = replace_suffix(result.c_file, ".pxi")
+ i_code = Code.PyrexCodeWriter(result.i_file)
+ else:
+ i_code = None
+
+ h_code.put_generated_by()
+ h_guard = Naming.h_guard_prefix + self.api_name(env)
+ h_code.put_h_guard(h_guard)
+ h_code.putln("")
+ h_code.putln('#include "Python.h"')
+ self.generate_type_header_code(h_types, h_code)
+ if options.capi_reexport_cincludes:
+ self.generate_includes(env, [], h_code)
+ h_code.putln("")
+ api_guard = Naming.api_guard_prefix + self.api_name(env)
+ h_code.putln("#ifndef %s" % api_guard)
+ h_code.putln("")
+ self.generate_extern_c_macro_definition(h_code)
+ h_code.putln("")
+ self.generate_dl_import_macro(h_code)
+ if h_extension_types:
+ h_code.putln("")
+ for entry in h_extension_types:
+ self.generate_cclass_header_code(entry.type, h_code)
+ if i_code:
+ self.generate_cclass_include_code(entry.type, i_code)
+ if h_funcs:
+ h_code.putln("")
+ for entry in h_funcs:
+ self.generate_public_declaration(entry, h_code, i_code)
+ if h_vars:
+ h_code.putln("")
+ for entry in h_vars:
+ self.generate_public_declaration(entry, h_code, i_code)
+ h_code.putln("")
+ h_code.putln("#endif /* !%s */" % api_guard)
+ h_code.putln("")
+ h_code.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */")
+ h_code.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */")
+ h_code.putln("")
+ h_code.putln("#if PY_MAJOR_VERSION < 3")
+ init_name = 'init' + (options.init_suffix or env.module_name)
+ h_code.putln("PyMODINIT_FUNC %s(void);" % init_name)
+ h_code.putln("#else")
+ h_code.putln("PyMODINIT_FUNC %s(void);" % self.mod_init_func_cname('PyInit', env, options))
+ h_code.putln("#endif")
+ h_code.putln("")
+ h_code.putln("#endif /* !%s */" % h_guard)
+
+ f = open_new_file(result.h_file)
+ try:
+ h_code.copyto(f)
+ finally:
+ f.close()
+
+ def generate_public_declaration(self, entry, h_code, i_code):
+ h_code.putln("%s %s;" % (
+ Naming.extern_c_macro,
+ entry.type.declaration_code(entry.cname)))
+ if i_code:
+ i_code.putln("cdef extern %s" % (
+ entry.type.declaration_code(entry.cname, pyrex=1)))
+
+ def api_name(self, env):
+ return env.qualified_name.replace(".", "__")
+
+ def generate_api_code(self, env, options, result):
+ def api_entries(entries, pxd=0):
+ return [entry for entry in entries
+ if entry.api or (pxd and entry.defined_in_pxd)]
+ api_vars = api_entries(env.var_entries)
+ api_funcs = api_entries(env.cfunc_entries)
+ api_extension_types = api_entries(env.c_class_entries)
+ if api_vars or api_funcs or api_extension_types:
+ result.api_file = replace_suffix(result.c_file, "_api.h")
+ h_code = Code.CCodeWriter()
+ c_code_config = generate_c_code_config(env, options)
+ Code.GlobalState(h_code, self, c_code_config)
+ h_code.put_generated_by()
+ api_guard = Naming.api_guard_prefix + self.api_name(env)
+ h_code.put_h_guard(api_guard)
+ # Work around https://bugs.python.org/issue4709
+ h_code.putln('#ifdef __MINGW64__')
+ h_code.putln('#define MS_WIN64')
+ h_code.putln('#endif')
+
+ h_code.putln('#include "Python.h"')
+ if result.h_file:
+ h_code.putln('#include "%s"' % os.path.basename(result.h_file))
+ if api_extension_types:
+ h_code.putln("")
+ for entry in api_extension_types:
+ type = entry.type
+ h_code.putln("static PyTypeObject *%s = 0;" % type.typeptr_cname)
+ h_code.putln("#define %s (*%s)" % (
+ type.typeobj_cname, type.typeptr_cname))
+ if api_funcs:
+ h_code.putln("")
+ for entry in api_funcs:
+ type = CPtrType(entry.type)
+ cname = env.mangle(Naming.func_prefix_api, entry.name)
+ h_code.putln("static %s = 0;" % type.declaration_code(cname))
+ h_code.putln("#define %s %s" % (entry.name, cname))
+ if api_vars:
+ h_code.putln("")
+ for entry in api_vars:
+ type = CPtrType(entry.type)
+ cname = env.mangle(Naming.varptr_prefix_api, entry.name)
+ h_code.putln("static %s = 0;" % type.declaration_code(cname))
+ h_code.putln("#define %s (*%s)" % (entry.name, cname))
+ h_code.put(UtilityCode.load_as_string("PyIdentifierFromString", "ImportExport.c")[0])
+ if api_vars:
+ h_code.put(UtilityCode.load_as_string("VoidPtrImport", "ImportExport.c")[1])
+ if api_funcs:
+ h_code.put(UtilityCode.load_as_string("FunctionImport", "ImportExport.c")[1])
+ if api_extension_types:
+ h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[0])
+ h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[1])
+ h_code.putln("")
+ h_code.putln("static int import_%s(void) {" % self.api_name(env))
+ h_code.putln("PyObject *module = 0;")
+ h_code.putln('module = PyImport_ImportModule("%s");' % env.qualified_name)
+ h_code.putln("if (!module) goto bad;")
+ for entry in api_funcs:
+ cname = env.mangle(Naming.func_prefix_api, entry.name)
+ sig = entry.type.signature_string()
+ h_code.putln(
+ 'if (__Pyx_ImportFunction(module, "%s", (void (**)(void))&%s, "%s") < 0) goto bad;'
+ % (entry.name, cname, sig))
+ for entry in api_vars:
+ cname = env.mangle(Naming.varptr_prefix_api, entry.name)
+ sig = entry.type.empty_declaration_code()
+ h_code.putln(
+ 'if (__Pyx_ImportVoidPtr(module, "%s", (void **)&%s, "%s") < 0) goto bad;'
+ % (entry.name, cname, sig))
+ with ModuleImportGenerator(h_code, imported_modules={env.qualified_name: 'module'}) as import_generator:
+ for entry in api_extension_types:
+ self.generate_type_import_call(entry.type, h_code, import_generator, error_code="goto bad;")
+ h_code.putln("Py_DECREF(module); module = 0;")
+ h_code.putln("return 0;")
+ h_code.putln("bad:")
+ h_code.putln("Py_XDECREF(module);")
+ h_code.putln("return -1;")
+ h_code.putln("}")
+ h_code.putln("")
+ h_code.putln("#endif /* !%s */" % api_guard)
+
+ f = open_new_file(result.api_file)
+ try:
+ h_code.copyto(f)
+ finally:
+ f.close()
+
+ def generate_cclass_header_code(self, type, h_code):
+ h_code.putln("%s %s %s;" % (
+ Naming.extern_c_macro,
+ PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
+ type.typeobj_cname))
+
+ def generate_cclass_include_code(self, type, i_code):
+ i_code.putln("cdef extern class %s.%s:" % (
+ type.module_name, type.name))
+ i_code.indent()
+ var_entries = type.scope.var_entries
+ if var_entries:
+ for entry in var_entries:
+ i_code.putln("cdef %s" % (
+ entry.type.declaration_code(entry.cname, pyrex=1)))
+ else:
+ i_code.putln("pass")
+ i_code.dedent()
+
+ def generate_c_code(self, env, options, result):
+ modules = self.referenced_modules
+
+ if Options.annotate or options.annotate:
+ rootwriter = Annotate.AnnotationCCodeWriter()
+ else:
+ rootwriter = Code.CCodeWriter()
+
+ c_code_config = generate_c_code_config(env, options)
+
+ globalstate = Code.GlobalState(
+ rootwriter, self,
+ code_config=c_code_config,
+ common_utility_include_dir=options.common_utility_include_dir,
+ )
+ globalstate.initialize_main_c_code()
+ h_code = globalstate['h_code']
+
+ self.generate_module_preamble(env, options, modules, result.embedded_metadata, h_code)
+
+ globalstate.module_pos = self.pos
+ globalstate.directives = self.directives
+
+ globalstate.use_utility_code(refnanny_utility_code)
+
+ code = globalstate['before_global_var']
+ code.putln('#define __Pyx_MODULE_NAME "%s"' % self.full_module_name)
+ module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
+ code.putln("extern int %s;" % module_is_main)
+ code.putln("int %s = 0;" % module_is_main)
+ code.putln("")
+ code.putln("/* Implementation of '%s' */" % env.qualified_name)
+
+ code = globalstate['late_includes']
+ code.putln("/* Late includes */")
+ self.generate_includes(env, modules, code, early=False)
+
+ code = globalstate['all_the_rest']
+
+ self.generate_cached_builtins_decls(env, code)
+ self.generate_lambda_definitions(env, code)
+ # generate normal variable and function definitions
+ self.generate_variable_definitions(env, code)
+
+ self.body.generate_function_definitions(env, code)
+
+ code.mark_pos(None)
+ self.generate_typeobj_definitions(env, code)
+ self.generate_method_table(env, code)
+ if env.has_import_star:
+ self.generate_import_star(env, code)
+ self.generate_pymoduledef_struct(env, options, code)
+
+ # initialise the macro to reduce the code size of one-time functionality
+ code.putln(UtilityCode.load_as_string("SmallCodeConfig", "ModuleSetupCode.c")[0].strip())
+
+ # init_globals is inserted before this
+ self.generate_module_init_func(modules[:-1], env, options, globalstate['init_module'])
+ self.generate_module_cleanup_func(env, globalstate['cleanup_module'])
+ if Options.embed:
+ self.generate_main_method(env, globalstate['main_method'])
+ self.generate_filename_table(globalstate['filename_table'])
+
+ self.generate_declarations_for_modules(env, modules, globalstate)
+ h_code.write('\n')
+
+ for utilcode in env.utility_code_list[:]:
+ globalstate.use_utility_code(utilcode)
+ globalstate.finalize_main_c_code()
+
+ f = open_new_file(result.c_file)
+ try:
+ rootwriter.copyto(f)
+ finally:
+ f.close()
+ result.c_file_generated = 1
+ if options.gdb_debug:
+ self._serialize_lineno_map(env, rootwriter)
+ if Options.annotate or options.annotate:
+ self._generate_annotations(rootwriter, result, options)
+
+ def _generate_annotations(self, rootwriter, result, options):
+ self.annotate(rootwriter)
+
+ coverage_xml_filename = Options.annotate_coverage_xml or options.annotate_coverage_xml
+ if coverage_xml_filename and os.path.exists(coverage_xml_filename):
+ try:
+ import xml.etree.cElementTree as ET
+ except ImportError:
+ import xml.etree.ElementTree as ET
+ coverage_xml = ET.parse(coverage_xml_filename).getroot()
+ if hasattr(coverage_xml, 'iter'):
+ iterator = coverage_xml.iter() # Python 2.7 & 3.2+
+ else:
+ iterator = coverage_xml.getiterator()
+ for el in iterator:
+ el.tail = None # save some memory
+ else:
+ coverage_xml = None
+
+ rootwriter.save_annotation(result.main_source_file, result.c_file, coverage_xml=coverage_xml)
+
+ # if we included files, additionally generate one annotation file for each
+ if not self.scope.included_files:
+ return
+
+ search_include_file = self.scope.context.search_include_directories
+ target_dir = os.path.abspath(os.path.dirname(result.c_file))
+ for included_file in self.scope.included_files:
+ target_file = os.path.abspath(os.path.join(target_dir, included_file))
+ target_file_dir = os.path.dirname(target_file)
+ if not target_file_dir.startswith(target_dir):
+ # any other directories may not be writable => avoid trying
+ continue
+ source_file = search_include_file(included_file, "", self.pos, include=True)
+ if not source_file:
+ continue
+ if target_file_dir != target_dir and not os.path.exists(target_file_dir):
+ try:
+ os.makedirs(target_file_dir)
+ except OSError as e:
+ import errno
+ if e.errno != errno.EEXIST:
+ raise
+ rootwriter.save_annotation(source_file, target_file, coverage_xml=coverage_xml)
+
+ def _serialize_lineno_map(self, env, ccodewriter):
+ tb = env.context.gdb_debug_outputwriter
+ markers = ccodewriter.buffer.allmarkers()
+
+ d = defaultdict(list)
+ for c_lineno, cython_lineno in enumerate(markers):
+ if cython_lineno > 0:
+ d[cython_lineno].append(c_lineno + 1)
+
+ tb.start('LineNumberMapping')
+ for cython_lineno, c_linenos in sorted(d.items()):
+ tb.add_entry(
+ 'LineNumber',
+ c_linenos=' '.join(map(str, c_linenos)),
+ cython_lineno=str(cython_lineno),
+ )
+ tb.end('LineNumberMapping')
+ tb.serialize()
+
+ def find_referenced_modules(self, env, module_list, modules_seen):
+ if env not in modules_seen:
+ modules_seen[env] = 1
+ for imported_module in env.cimported_modules:
+ self.find_referenced_modules(imported_module, module_list, modules_seen)
+ module_list.append(env)
+
+ def sort_types_by_inheritance(self, type_dict, type_order, getkey):
+ # copy the types into a list moving each parent type before
+ # its first child
+ type_list = []
+ for i, key in enumerate(type_order):
+ new_entry = type_dict[key]
+
+ # collect all base classes to check for children
+ hierarchy = set()
+ base = new_entry
+ while base:
+ base_type = base.type.base_type
+ if not base_type:
+ break
+ base_key = getkey(base_type)
+ hierarchy.add(base_key)
+ base = type_dict.get(base_key)
+ new_entry.base_keys = hierarchy
+
+ # find the first (sub-)subclass and insert before that
+ for j in range(i):
+ entry = type_list[j]
+ if key in entry.base_keys:
+ type_list.insert(j, new_entry)
+ break
+ else:
+ type_list.append(new_entry)
+ return type_list
+
+ def sort_type_hierarchy(self, module_list, env):
+ # poor developer's OrderedDict
+ vtab_dict, vtab_dict_order = {}, []
+ vtabslot_dict, vtabslot_dict_order = {}, []
+
+ for module in module_list:
+ for entry in module.c_class_entries:
+ if entry.used and not entry.in_cinclude:
+ type = entry.type
+ key = type.vtabstruct_cname
+ if not key:
+ continue
+ if key in vtab_dict:
+ # FIXME: this should *never* happen, but apparently it does
+ # for Cython generated utility code
+ from .UtilityCode import NonManglingModuleScope
+ assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope)
+ assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope)
+ else:
+ vtab_dict[key] = entry
+ vtab_dict_order.append(key)
+ all_defined_here = module is env
+ for entry in module.type_entries:
+ if entry.used and (all_defined_here or entry.defined_in_pxd):
+ type = entry.type
+ if type.is_extension_type and not entry.in_cinclude:
+ type = entry.type
+ key = type.objstruct_cname
+ assert key not in vtabslot_dict, key
+ vtabslot_dict[key] = entry
+ vtabslot_dict_order.append(key)
+
+ def vtabstruct_cname(entry_type):
+ return entry_type.vtabstruct_cname
+ vtab_list = self.sort_types_by_inheritance(
+ vtab_dict, vtab_dict_order, vtabstruct_cname)
+
+ def objstruct_cname(entry_type):
+ return entry_type.objstruct_cname
+ vtabslot_list = self.sort_types_by_inheritance(
+ vtabslot_dict, vtabslot_dict_order, objstruct_cname)
+
+ return (vtab_list, vtabslot_list)
+
+ def sort_cdef_classes(self, env):
+ key_func = operator.attrgetter('objstruct_cname')
+ entry_dict, entry_order = {}, []
+ for entry in env.c_class_entries:
+ key = key_func(entry.type)
+ assert key not in entry_dict, key
+ entry_dict[key] = entry
+ entry_order.append(key)
+ env.c_class_entries[:] = self.sort_types_by_inheritance(
+ entry_dict, entry_order, key_func)
+
+ def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code):
+ # TODO: Why are these separated out?
+ for entry in vtabslot_list:
+ self.generate_objstruct_predeclaration(entry.type, code)
+ vtabslot_entries = set(vtabslot_list)
+ ctuple_names = set()
+ for module in modules:
+ definition = module is env
+ type_entries = []
+ for entry in module.type_entries:
+ if entry.type.is_ctuple and entry.used:
+ if entry.name not in ctuple_names:
+ ctuple_names.add(entry.name)
+ type_entries.append(entry)
+ elif definition or entry.defined_in_pxd:
+ type_entries.append(entry)
+ type_entries = [t for t in type_entries if t not in vtabslot_entries]
+ self.generate_type_header_code(type_entries, code)
+ for entry in vtabslot_list:
+ self.generate_objstruct_definition(entry.type, code)
+ self.generate_typeobj_predeclaration(entry, code)
+ for entry in vtab_list:
+ self.generate_typeobj_predeclaration(entry, code)
+ self.generate_exttype_vtable_struct(entry, code)
+ self.generate_exttype_vtabptr_declaration(entry, code)
+ self.generate_exttype_final_methods_declaration(entry, code)
+
+ def generate_declarations_for_modules(self, env, modules, globalstate):
+ typecode = globalstate['type_declarations']
+ typecode.putln("")
+ typecode.putln("/*--- Type declarations ---*/")
+ # This is to work around the fact that array.h isn't part of the C-API,
+ # but we need to declare it earlier than utility code.
+ if 'cpython.array' in [m.qualified_name for m in modules]:
+ typecode.putln('#ifndef _ARRAYARRAY_H')
+ typecode.putln('struct arrayobject;')
+ typecode.putln('typedef struct arrayobject arrayobject;')
+ typecode.putln('#endif')
+ vtab_list, vtabslot_list = self.sort_type_hierarchy(modules, env)
+ self.generate_type_definitions(
+ env, modules, vtab_list, vtabslot_list, typecode)
+ modulecode = globalstate['module_declarations']
+ for module in modules:
+ defined_here = module is env
+ modulecode.putln("")
+ modulecode.putln("/* Module declarations from '%s' */" % module.qualified_name)
+ self.generate_c_class_declarations(module, modulecode, defined_here)
+ self.generate_cvariable_declarations(module, modulecode, defined_here)
+ self.generate_cfunction_declarations(module, modulecode, defined_here)
+
+ def _put_setup_code(self, code, name):
+ code.put(UtilityCode.load_as_string(name, "ModuleSetupCode.c")[1])
+
+ def generate_module_preamble(self, env, options, cimported_modules, metadata, code):
+ code.put_generated_by()
+ if metadata:
+ code.putln("/* BEGIN: Cython Metadata")
+ code.putln(json.dumps(metadata, indent=4, sort_keys=True))
+ code.putln("END: Cython Metadata */")
+ code.putln("")
+
+ code.putln("#ifndef PY_SSIZE_T_CLEAN")
+ code.putln("#define PY_SSIZE_T_CLEAN")
+ code.putln("#endif /* PY_SSIZE_T_CLEAN */")
+
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.INITIAL:
+ inc.write(code)
+ code.putln("#ifndef Py_PYTHON_H")
+ code.putln(" #error Python headers needed to compile C extensions, "
+ "please install development version of Python.")
+ code.putln("#elif PY_VERSION_HEX < 0x02060000 || "
+ "(0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)")
+ code.putln(" #error Cython requires Python 2.6+ or Python 3.3+.")
+ code.putln("#else")
+ code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
+
+ from .. import __version__
+ code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_'))
+ code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__))
+ code.putln("#define CYTHON_FUTURE_DIVISION %d" % (
+ Future.division in env.context.future_directives))
+
+ self._put_setup_code(code, "CModulePreamble")
+ if env.context.options.cplus:
+ self._put_setup_code(code, "CppInitCode")
+ else:
+ self._put_setup_code(code, "CInitCode")
+ self._put_setup_code(code, "PythonCompatibility")
+ self._put_setup_code(code, "MathInitCode")
+
+ # Using "(void)cname" to prevent "unused" warnings.
+ if options.c_line_in_traceback:
+ cinfo = "%s = %s; (void)%s; " % (Naming.clineno_cname, Naming.line_c_macro, Naming.clineno_cname)
+ else:
+ cinfo = ""
+ code.putln("#define __PYX_MARK_ERR_POS(f_index, lineno) \\")
+ code.putln(" { %s = %s[f_index]; (void)%s; %s = lineno; (void)%s; %s}" % (
+ Naming.filename_cname, Naming.filetable_cname, Naming.filename_cname,
+ Naming.lineno_cname, Naming.lineno_cname,
+ cinfo
+ ))
+ code.putln("#define __PYX_ERR(f_index, lineno, Ln_error) \\")
+ code.putln(" { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }")
+
+ code.putln("")
+ self.generate_extern_c_macro_definition(code)
+ code.putln("")
+
+ code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
+ code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
+ code.putln("/* Early includes */")
+ self.generate_includes(env, cimported_modules, code, late=False)
+ code.putln("")
+ code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
+ code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
+ code.putln("#endif")
+ code.putln("")
+
+ if env.directives['ccomplex']:
+ code.putln("")
+ code.putln("#if !defined(CYTHON_CCOMPLEX)")
+ code.putln("#define CYTHON_CCOMPLEX 1")
+ code.putln("#endif")
+ code.putln("")
+ code.put(UtilityCode.load_as_string("UtilityFunctionPredeclarations", "ModuleSetupCode.c")[0])
+
+ c_string_type = env.directives['c_string_type']
+ c_string_encoding = env.directives['c_string_encoding']
+ if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding:
+ error(self.pos, "a default encoding must be provided if c_string_type is not a byte type")
+ code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII %s' % int(c_string_encoding == 'ascii'))
+ code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 %s' %
+ int(c_string_encoding.replace('-', '').lower() == 'utf8'))
+ if c_string_encoding == 'default':
+ code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 1')
+ else:
+ code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT '
+ '(PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)')
+ code.putln('#define __PYX_DEFAULT_STRING_ENCODING "%s"' % c_string_encoding)
+ if c_string_type == 'bytearray':
+ c_string_func_name = 'ByteArray'
+ else:
+ c_string_func_name = c_string_type.title()
+ code.putln('#define __Pyx_PyObject_FromString __Pyx_Py%s_FromString' % c_string_func_name)
+ code.putln('#define __Pyx_PyObject_FromStringAndSize __Pyx_Py%s_FromStringAndSize' % c_string_func_name)
+ code.put(UtilityCode.load_as_string("TypeConversions", "TypeConversion.c")[0])
+
+ # These utility functions are assumed to exist and used elsewhere.
+ PyrexTypes.c_long_type.create_to_py_utility_code(env)
+ PyrexTypes.c_long_type.create_from_py_utility_code(env)
+ PyrexTypes.c_int_type.create_from_py_utility_code(env)
+
+ code.put(Nodes.branch_prediction_macros)
+ code.putln('static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }')
+ code.putln('')
+ code.putln('static PyObject *%s = NULL;' % env.module_cname)
+ code.putln('static PyObject *%s;' % env.module_dict_cname)
+ code.putln('static PyObject *%s;' % Naming.builtins_cname)
+ code.putln('static PyObject *%s = NULL;' % Naming.cython_runtime_cname)
+ code.putln('static PyObject *%s;' % Naming.empty_tuple)
+ code.putln('static PyObject *%s;' % Naming.empty_bytes)
+ code.putln('static PyObject *%s;' % Naming.empty_unicode)
+ if Options.pre_import is not None:
+ code.putln('static PyObject *%s;' % Naming.preimport_cname)
+ code.putln('static int %s;' % Naming.lineno_cname)
+ code.putln('static int %s = 0;' % Naming.clineno_cname)
+ code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
+ code.putln('static const char *%s;' % Naming.filename_cname)
+
+ env.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ if has_np_pythran(env):
+ env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
+
+ def generate_extern_c_macro_definition(self, code):
+ name = Naming.extern_c_macro
+ code.putln("#ifndef %s" % name)
+ code.putln(" #ifdef __cplusplus")
+ code.putln(' #define %s extern "C"' % name)
+ code.putln(" #else")
+ code.putln(" #define %s extern" % name)
+ code.putln(" #endif")
+ code.putln("#endif")
+
+ def generate_dl_import_macro(self, code):
+ code.putln("#ifndef DL_IMPORT")
+ code.putln(" #define DL_IMPORT(_T) _T")
+ code.putln("#endif")
+
+ def generate_includes(self, env, cimported_modules, code, early=True, late=True):
+ includes = []
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.EARLY:
+ if early:
+ inc.write(code)
+ elif inc.location == inc.LATE:
+ if late:
+ inc.write(code)
+ if early:
+ code.putln_openmp("#include <omp.h>")
+
+ def generate_filename_table(self, code):
+ from os.path import isabs, basename
+ code.putln("")
+ code.putln("static const char *%s[] = {" % Naming.filetable_cname)
+ if code.globalstate.filename_list:
+ for source_desc in code.globalstate.filename_list:
+ file_path = source_desc.get_filenametable_entry()
+ if Options.source_root:
+ # If source root specified, dump description - it's source root relative filename
+ file_path = source_desc.get_description()
+ if isabs(file_path):
+ file_path = basename(file_path) # never include absolute paths
+ escaped_filename = file_path.replace("\\", "\\\\").replace('"', r'\"')
+ code.putln('"%s",' % escaped_filename)
+ else:
+ # Some C compilers don't like an empty array
+ code.putln("0")
+ code.putln("};")
+
+ def generate_type_predeclarations(self, env, code):
+ pass
+
+ def generate_type_header_code(self, type_entries, code):
+ # Generate definitions of structs/unions/enums/typedefs/objstructs.
+ #self.generate_gcc33_hack(env, code) # Is this still needed?
+ # Forward declarations
+ for entry in type_entries:
+ if not entry.in_cinclude:
+ #print "generate_type_header_code:", entry.name, repr(entry.type) ###
+ type = entry.type
+ if type.is_typedef: # Must test this first!
+ pass
+ elif type.is_struct_or_union or type.is_cpp_class:
+ self.generate_struct_union_predeclaration(entry, code)
+ elif type.is_ctuple and entry.used:
+ self.generate_struct_union_predeclaration(entry.type.struct_entry, code)
+ elif type.is_extension_type:
+ self.generate_objstruct_predeclaration(type, code)
+ # Actual declarations
+ for entry in type_entries:
+ if not entry.in_cinclude:
+ #print "generate_type_header_code:", entry.name, repr(entry.type) ###
+ type = entry.type
+ if type.is_typedef: # Must test this first!
+ self.generate_typedef(entry, code)
+ elif type.is_enum:
+ self.generate_enum_definition(entry, code)
+ elif type.is_struct_or_union:
+ self.generate_struct_union_definition(entry, code)
+ elif type.is_ctuple and entry.used:
+ self.generate_struct_union_definition(entry.type.struct_entry, code)
+ elif type.is_cpp_class:
+ self.generate_cpp_class_definition(entry, code)
+ elif type.is_extension_type:
+ self.generate_objstruct_definition(type, code)
+
+ def generate_gcc33_hack(self, env, code):
+ # Workaround for spurious warning generation in gcc 3.3
+ code.putln("")
+ for entry in env.c_class_entries:
+ type = entry.type
+ if not type.typedef_flag:
+ name = type.objstruct_cname
+ if name.startswith("__pyx_"):
+ tail = name[6:]
+ else:
+ tail = name
+ code.putln("typedef struct %s __pyx_gcc33_%s;" % (
+ name, tail))
+
+ def generate_typedef(self, entry, code):
+ base_type = entry.type.typedef_base_type
+ if base_type.is_numeric:
+ try:
+ writer = code.globalstate['numeric_typedefs']
+ except KeyError:
+ writer = code
+ else:
+ writer = code
+ writer.mark_pos(entry.pos)
+ writer.putln("typedef %s;" % base_type.declaration_code(entry.cname))
+
+ def sue_predeclaration(self, type, kind, name):
+ if type.typedef_flag:
+ return "%s %s;\ntypedef %s %s %s;" % (
+ kind, name,
+ kind, name, name)
+ else:
+ return "%s %s;" % (kind, name)
+
+ def generate_struct_union_predeclaration(self, entry, code):
+ type = entry.type
+ if type.is_cpp_class and type.templates:
+ code.putln("template <typename %s>" % ", typename ".join(
+ [T.empty_declaration_code() for T in type.templates]))
+ code.putln(self.sue_predeclaration(type, type.kind, type.cname))
+
+ def sue_header_footer(self, type, kind, name):
+ header = "%s %s {" % (kind, name)
+ footer = "};"
+ return header, footer
+
+ def generate_struct_union_definition(self, entry, code):
+ code.mark_pos(entry.pos)
+ type = entry.type
+ scope = type.scope
+ if scope:
+ kind = type.kind
+ packed = type.is_struct and type.packed
+ if packed:
+ kind = "%s %s" % (type.kind, "__Pyx_PACKED")
+ code.globalstate.use_utility_code(packed_struct_utility_code)
+ header, footer = \
+ self.sue_header_footer(type, kind, type.cname)
+ if packed:
+ code.putln("#if defined(__SUNPRO_C)")
+ code.putln(" #pragma pack(1)")
+ code.putln("#elif !defined(__GNUC__)")
+ code.putln(" #pragma pack(push, 1)")
+ code.putln("#endif")
+ code.putln(header)
+ var_entries = scope.var_entries
+ if not var_entries:
+ error(entry.pos, "Empty struct or union definition not allowed outside a 'cdef extern from' block")
+ for attr in var_entries:
+ code.putln(
+ "%s;" % attr.type.declaration_code(attr.cname))
+ code.putln(footer)
+ if packed:
+ code.putln("#if defined(__SUNPRO_C)")
+ code.putln(" #pragma pack()")
+ code.putln("#elif !defined(__GNUC__)")
+ code.putln(" #pragma pack(pop)")
+ code.putln("#endif")
+
+ def generate_cpp_class_definition(self, entry, code):
+ code.mark_pos(entry.pos)
+ type = entry.type
+ scope = type.scope
+ if scope:
+ if type.templates:
+ code.putln("template <class %s>" % ", class ".join(
+ [T.empty_declaration_code() for T in type.templates]))
+ # Just let everything be public.
+ code.put("struct %s" % type.cname)
+ if type.base_classes:
+ base_class_decl = ", public ".join(
+ [base_class.empty_declaration_code() for base_class in type.base_classes])
+ code.put(" : public %s" % base_class_decl)
+ code.putln(" {")
+ py_attrs = [e for e in scope.entries.values()
+ if e.type.is_pyobject and not e.is_inherited]
+ has_virtual_methods = False
+ constructor = None
+ destructor = None
+ for attr in scope.var_entries:
+ if attr.type.is_cfunction:
+ code.put("inline ")
+ if attr.type.is_cfunction and attr.type.is_static_method:
+ code.put("static ")
+ elif attr.name == "<init>":
+ constructor = attr
+ elif attr.name == "<del>":
+ destructor = attr
+ elif attr.type.is_cfunction:
+ code.put("virtual ")
+ has_virtual_methods = True
+ code.putln("%s;" % attr.type.declaration_code(attr.cname))
+ is_implementing = 'init_module' in code.globalstate.parts
+ if constructor or py_attrs:
+ if constructor:
+ arg_decls = []
+ arg_names = []
+ for arg in constructor.type.original_args[
+ :len(constructor.type.args)-constructor.type.optional_arg_count]:
+ arg_decls.append(arg.declaration_code())
+ arg_names.append(arg.cname)
+ if constructor.type.optional_arg_count:
+ arg_decls.append(constructor.type.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ arg_names.append(Naming.optional_args_cname)
+ if not arg_decls:
+ arg_decls = ["void"]
+ else:
+ arg_decls = ["void"]
+ arg_names = []
+ if is_implementing:
+ code.putln("%s(%s) {" % (type.cname, ", ".join(arg_decls)))
+ if py_attrs:
+ code.put_ensure_gil()
+ for attr in py_attrs:
+ code.put_init_var_to_py_none(attr, nanny=False);
+ if constructor:
+ code.putln("%s(%s);" % (constructor.cname, ", ".join(arg_names)))
+ if py_attrs:
+ code.put_release_ensured_gil()
+ code.putln("}")
+ else:
+ code.putln("%s(%s);" % (type.cname, ", ".join(arg_decls)))
+ if destructor or py_attrs or has_virtual_methods:
+ if has_virtual_methods:
+ code.put("virtual ")
+ if is_implementing:
+ code.putln("~%s() {" % type.cname)
+ if py_attrs:
+ code.put_ensure_gil()
+ if destructor:
+ code.putln("%s();" % destructor.cname)
+ if py_attrs:
+ for attr in py_attrs:
+ code.put_var_xdecref(attr, nanny=False);
+ code.put_release_ensured_gil()
+ code.putln("}")
+ else:
+ code.putln("~%s();" % type.cname)
+ if py_attrs:
+ # Also need copy constructor and assignment operators.
+ if is_implementing:
+ code.putln("%s(const %s& __Pyx_other) {" % (type.cname, type.cname))
+ code.put_ensure_gil()
+ for attr in scope.var_entries:
+ if not attr.type.is_cfunction:
+ code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
+ code.put_var_incref(attr, nanny=False)
+ code.put_release_ensured_gil()
+ code.putln("}")
+ code.putln("%s& operator=(const %s& __Pyx_other) {" % (type.cname, type.cname))
+ code.putln("if (this != &__Pyx_other) {")
+ code.put_ensure_gil()
+ for attr in scope.var_entries:
+ if not attr.type.is_cfunction:
+ code.put_var_xdecref(attr, nanny=False);
+ code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
+ code.put_var_incref(attr, nanny=False)
+ code.put_release_ensured_gil()
+ code.putln("}")
+ code.putln("return *this;")
+ code.putln("}")
+ else:
+ code.putln("%s(const %s& __Pyx_other);" % (type.cname, type.cname))
+ code.putln("%s& operator=(const %s& __Pyx_other);" % (type.cname, type.cname))
+ code.putln("};")
+
+ def generate_enum_definition(self, entry, code):
+ code.mark_pos(entry.pos)
+ type = entry.type
+ name = entry.cname or entry.name or ""
+ header, footer = self.sue_header_footer(type, "enum", name)
+ code.putln(header)
+ enum_values = entry.enum_values
+ if not enum_values:
+ error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block")
+ else:
+ last_entry = enum_values[-1]
+ # this does not really generate code, just builds the result value
+ for value_entry in enum_values:
+ if value_entry.value_node is not None:
+ value_entry.value_node.generate_evaluation_code(code)
+
+ for value_entry in enum_values:
+ if value_entry.value_node is None:
+ value_code = value_entry.cname
+ else:
+ value_code = ("%s = %s" % (
+ value_entry.cname,
+ value_entry.value_node.result()))
+ if value_entry is not last_entry:
+ value_code += ","
+ code.putln(value_code)
+ code.putln(footer)
+ if entry.type.typedef_flag:
+ # Not pre-declared.
+ code.putln("typedef enum %s %s;" % (name, name))
+
+ def generate_typeobj_predeclaration(self, entry, code):
+ code.putln("")
+ name = entry.type.typeobj_cname
+ if name:
+ if entry.visibility == 'extern' and not entry.in_cinclude:
+ code.putln("%s %s %s;" % (
+ Naming.extern_c_macro,
+ PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
+ name))
+ elif entry.visibility == 'public':
+ code.putln("%s %s %s;" % (
+ Naming.extern_c_macro,
+ PyrexTypes.public_decl("PyTypeObject", "DL_EXPORT"),
+ name))
+ # ??? Do we really need the rest of this? ???
+ #else:
+ # code.putln("static PyTypeObject %s;" % name)
+
+ def generate_exttype_vtable_struct(self, entry, code):
+ if not entry.used:
+ return
+
+ code.mark_pos(entry.pos)
+ # Generate struct declaration for an extension type's vtable.
+ type = entry.type
+ scope = type.scope
+
+ self.specialize_fused_types(scope)
+
+ if type.vtabstruct_cname:
+ code.putln("")
+ code.putln("struct %s {" % type.vtabstruct_cname)
+ if type.base_type and type.base_type.vtabstruct_cname:
+ code.putln("struct %s %s;" % (
+ type.base_type.vtabstruct_cname,
+ Naming.obj_base_cname))
+ for method_entry in scope.cfunc_entries:
+ if not method_entry.is_inherited:
+ code.putln("%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname))
+ code.putln("};")
+
+ def generate_exttype_vtabptr_declaration(self, entry, code):
+ if not entry.used:
+ return
+
+ code.mark_pos(entry.pos)
+ # Generate declaration of pointer to an extension type's vtable.
+ type = entry.type
+ if type.vtabptr_cname:
+ code.putln("static struct %s *%s;" % (
+ type.vtabstruct_cname,
+ type.vtabptr_cname))
+
+ def generate_exttype_final_methods_declaration(self, entry, code):
+ if not entry.used:
+ return
+
+ code.mark_pos(entry.pos)
+ # Generate final methods prototypes
+ type = entry.type
+ for method_entry in entry.type.scope.cfunc_entries:
+ if not method_entry.is_inherited and method_entry.final_func_cname:
+ declaration = method_entry.type.declaration_code(
+ method_entry.final_func_cname)
+ modifiers = code.build_function_modifiers(method_entry.func_modifiers)
+ code.putln("static %s%s;" % (modifiers, declaration))
+
+ def generate_objstruct_predeclaration(self, type, code):
+ if not type.scope:
+ return
+ code.putln(self.sue_predeclaration(type, "struct", type.objstruct_cname))
+
+ def generate_objstruct_definition(self, type, code):
+ code.mark_pos(type.pos)
+ # Generate object struct definition for an
+ # extension type.
+ if not type.scope:
+ return # Forward declared but never defined
+ header, footer = \
+ self.sue_header_footer(type, "struct", type.objstruct_cname)
+ code.putln(header)
+ base_type = type.base_type
+ if base_type:
+ basestruct_cname = base_type.objstruct_cname
+ if basestruct_cname == "PyTypeObject":
+ # User-defined subclasses of type are heap allocated.
+ basestruct_cname = "PyHeapTypeObject"
+ code.putln(
+ "%s%s %s;" % (
+ ("struct ", "")[base_type.typedef_flag],
+ basestruct_cname,
+ Naming.obj_base_cname))
+ else:
+ code.putln(
+ "PyObject_HEAD")
+ if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname):
+ code.putln(
+ "struct %s *%s;" % (
+ type.vtabstruct_cname,
+ type.vtabslot_cname))
+ for attr in type.scope.var_entries:
+ if attr.is_declared_generic:
+ attr_type = py_object_type
+ else:
+ attr_type = attr.type
+ code.putln(
+ "%s;" % attr_type.declaration_code(attr.cname))
+ code.putln(footer)
+ if type.objtypedef_cname is not None:
+ # Only for exposing public typedef name.
+ code.putln("typedef struct %s %s;" % (type.objstruct_cname, type.objtypedef_cname))
+
+ def generate_c_class_declarations(self, env, code, definition):
+ for entry in env.c_class_entries:
+ if definition or entry.defined_in_pxd:
+ code.putln("static PyTypeObject *%s = 0;" % (
+ entry.type.typeptr_cname))
+
+ def generate_cvariable_declarations(self, env, code, definition):
+ if env.is_cython_builtin:
+ return
+ for entry in env.var_entries:
+ if (entry.in_cinclude or entry.in_closure or
+ (entry.visibility == 'private' and not (entry.defined_in_pxd or entry.used))):
+ continue
+
+ storage_class = None
+ dll_linkage = None
+ init = None
+
+ if entry.visibility == 'extern':
+ storage_class = Naming.extern_c_macro
+ dll_linkage = "DL_IMPORT"
+ elif entry.visibility == 'public':
+ storage_class = Naming.extern_c_macro
+ if definition:
+ dll_linkage = "DL_EXPORT"
+ else:
+ dll_linkage = "DL_IMPORT"
+ elif entry.visibility == 'private':
+ storage_class = "static"
+ dll_linkage = None
+ if entry.init is not None:
+ init = entry.type.literal_code(entry.init)
+ type = entry.type
+ cname = entry.cname
+
+ if entry.defined_in_pxd and not definition:
+ storage_class = "static"
+ dll_linkage = None
+ type = CPtrType(type)
+ cname = env.mangle(Naming.varptr_prefix, entry.name)
+ init = 0
+
+ if storage_class:
+ code.put("%s " % storage_class)
+ code.put(type.declaration_code(
+ cname, dll_linkage=dll_linkage))
+ if init is not None:
+ code.put_safe(" = %s" % init)
+ code.putln(";")
+ if entry.cname != cname:
+ code.putln("#define %s (*%s)" % (entry.cname, cname))
+
+ def generate_cfunction_declarations(self, env, code, definition):
+ for entry in env.cfunc_entries:
+ if entry.used or (entry.visibility == 'public' or entry.api):
+ generate_cfunction_declaration(entry, env, code, definition)
+
+ def generate_variable_definitions(self, env, code):
+ for entry in env.var_entries:
+ if not entry.in_cinclude and entry.visibility == "public":
+ code.put(entry.type.declaration_code(entry.cname))
+ if entry.init is not None:
+ init = entry.type.literal_code(entry.init)
+ code.put_safe(" = %s" % init)
+ code.putln(";")
+
+ def generate_typeobj_definitions(self, env, code):
+ full_module_name = env.qualified_name
+ for entry in env.c_class_entries:
+ #print "generate_typeobj_definitions:", entry.name
+ #print "...visibility =", entry.visibility
+ if entry.visibility != 'extern':
+ type = entry.type
+ scope = type.scope
+ if scope: # could be None if there was an error
+ if not scope.directives['c_api_binop_methods']:
+ error(self.pos,
+ "The 'c_api_binop_methods' directive is only supported for forward compatibility"
+ " and must be True.")
+ self.generate_exttype_vtable(scope, code)
+ self.generate_new_function(scope, code, entry)
+ self.generate_dealloc_function(scope, code)
+ if scope.needs_gc():
+ self.generate_traverse_function(scope, code, entry)
+ if scope.needs_tp_clear():
+ self.generate_clear_function(scope, code, entry)
+ if scope.defines_any_special(["__getitem__"]):
+ self.generate_getitem_int_function(scope, code)
+ if scope.defines_any_special(["__setitem__", "__delitem__"]):
+ self.generate_ass_subscript_function(scope, code)
+ if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]):
+ warning(self.pos,
+ "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, "
+ "use __getitem__, __setitem__, and __delitem__ instead", 1)
+ code.putln("#if PY_MAJOR_VERSION >= 3")
+ code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.")
+ code.putln("#endif")
+ if scope.defines_any_special(["__setslice__", "__delslice__"]):
+ self.generate_ass_slice_function(scope, code)
+ if scope.defines_any_special(["__getattr__", "__getattribute__"]):
+ self.generate_getattro_function(scope, code)
+ if scope.defines_any_special(["__setattr__", "__delattr__"]):
+ self.generate_setattro_function(scope, code)
+ if scope.defines_any_special(["__get__"]):
+ self.generate_descr_get_function(scope, code)
+ if scope.defines_any_special(["__set__", "__delete__"]):
+ self.generate_descr_set_function(scope, code)
+ if not scope.is_closure_class_scope and scope.defines_any(["__dict__"]):
+ self.generate_dict_getter_function(scope, code)
+ if scope.defines_any_special(TypeSlots.richcmp_special_methods):
+ self.generate_richcmp_function(scope, code)
+ self.generate_property_accessors(scope, code)
+ self.generate_method_table(scope, code)
+ self.generate_getset_table(scope, code)
+ self.generate_typeobj_definition(full_module_name, entry, code)
+
+ def generate_exttype_vtable(self, scope, code):
+ # Generate the definition of an extension type's vtable.
+ type = scope.parent_type
+ if type.vtable_cname:
+ code.putln("static struct %s %s;" % (
+ type.vtabstruct_cname,
+ type.vtable_cname))
+
+ def generate_self_cast(self, scope, code):
+ type = scope.parent_type
+ code.putln(
+ "%s = (%s)o;" % (
+ type.declaration_code("p"),
+ type.empty_declaration_code()))
+
+ def generate_new_function(self, scope, code, cclass_entry):
+ tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
+ slot_func = scope.mangle_internal("tp_new")
+ type = scope.parent_type
+ base_type = type.base_type
+
+ have_entries, (py_attrs, py_buffers, memoryview_slices) = \
+ scope.get_refcounted_entries()
+ is_final_type = scope.parent_type.is_final_type
+ if scope.is_internal:
+ # internal classes (should) never need None inits, normal zeroing will do
+ py_attrs = []
+ cpp_class_attrs = [entry for entry in scope.var_entries
+ if entry.type.is_cpp_class]
+
+ new_func_entry = scope.lookup_here("__new__")
+ if base_type or (new_func_entry and new_func_entry.is_special
+ and not new_func_entry.trivial_signature):
+ unused_marker = ''
+ else:
+ unused_marker = 'CYTHON_UNUSED '
+
+ if base_type:
+ freelist_size = 0 # not currently supported
+ else:
+ freelist_size = scope.directives.get('freelist', 0)
+ freelist_name = scope.mangle_internal(Naming.freelist_name)
+ freecount_name = scope.mangle_internal(Naming.freecount_name)
+
+ decls = code.globalstate['decls']
+ decls.putln("static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/" %
+ slot_func)
+ code.putln("")
+ if freelist_size:
+ code.putln("static %s[%d];" % (
+ scope.parent_type.declaration_code(freelist_name),
+ freelist_size))
+ code.putln("static int %s = 0;" % freecount_name)
+ code.putln("")
+ code.putln(
+ "static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {" % (
+ slot_func, unused_marker, unused_marker))
+
+ need_self_cast = (type.vtabslot_cname or
+ (py_buffers or memoryview_slices or py_attrs) or
+ cpp_class_attrs)
+ if need_self_cast:
+ code.putln("%s;" % scope.parent_type.declaration_code("p"))
+ if base_type:
+ tp_new = TypeSlots.get_base_slot_function(scope, tp_slot)
+ if tp_new is None:
+ tp_new = "%s->tp_new" % base_type.typeptr_cname
+ code.putln("PyObject *o = %s(t, a, k);" % tp_new)
+ else:
+ code.putln("PyObject *o;")
+ if freelist_size:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ if is_final_type:
+ type_safety_check = ''
+ else:
+ type_safety_check = ' & (int)((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)'
+ obj_struct = type.declaration_code("", deref=True)
+ code.putln(
+ "if (CYTHON_COMPILING_IN_CPYTHON & likely((int)(%s > 0) & (int)(t->tp_basicsize == sizeof(%s))%s)) {" % (
+ freecount_name, obj_struct, type_safety_check))
+ code.putln("o = (PyObject*)%s[--%s];" % (
+ freelist_name, freecount_name))
+ code.putln("memset(o, 0, sizeof(%s));" % obj_struct)
+ code.putln("(void) PyObject_INIT(o, t);")
+ if scope.needs_gc():
+ code.putln("PyObject_GC_Track(o);")
+ code.putln("} else {")
+ if not is_final_type:
+ code.putln("if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {")
+ code.putln("o = (*t->tp_alloc)(t, 0);")
+ if not is_final_type:
+ code.putln("} else {")
+ code.putln("o = (PyObject *) PyBaseObject_Type.tp_new(t, %s, 0);" % Naming.empty_tuple)
+ code.putln("}")
+ code.putln("if (unlikely(!o)) return 0;")
+ if freelist_size and not base_type:
+ code.putln('}')
+ if need_self_cast:
+ code.putln("p = %s;" % type.cast_code("o"))
+ #if need_self_cast:
+ # self.generate_self_cast(scope, code)
+
+ # from this point on, ensure DECREF(o) on failure
+ needs_error_cleanup = False
+
+ if type.vtabslot_cname:
+ vtab_base_type = type
+ while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname:
+ vtab_base_type = vtab_base_type.base_type
+ if vtab_base_type is not type:
+ struct_type_cast = "(struct %s*)" % vtab_base_type.vtabstruct_cname
+ else:
+ struct_type_cast = ""
+ code.putln("p->%s = %s%s;" % (
+ type.vtabslot_cname,
+ struct_type_cast, type.vtabptr_cname))
+
+ for entry in cpp_class_attrs:
+ code.putln("new((void*)&(p->%s)) %s();" % (
+ entry.cname, entry.type.empty_declaration_code()))
+
+ for entry in py_attrs:
+ if entry.name == "__dict__":
+ needs_error_cleanup = True
+ code.put("p->%s = PyDict_New(); if (unlikely(!p->%s)) goto bad;" % (
+ entry.cname, entry.cname))
+ else:
+ code.put_init_var_to_py_none(entry, "p->%s", nanny=False)
+
+ for entry in memoryview_slices:
+ code.putln("p->%s.data = NULL;" % entry.cname)
+ code.putln("p->%s.memview = NULL;" % entry.cname)
+
+ for entry in py_buffers:
+ code.putln("p->%s.obj = NULL;" % entry.cname)
+
+ if cclass_entry.cname == '__pyx_memoryviewslice':
+ code.putln("p->from_slice.memview = NULL;")
+
+ if new_func_entry and new_func_entry.is_special:
+ if new_func_entry.trivial_signature:
+ cinit_args = "o, %s, NULL" % Naming.empty_tuple
+ else:
+ cinit_args = "o, a, k"
+ needs_error_cleanup = True
+ code.putln("if (unlikely(%s(%s) < 0)) goto bad;" % (
+ new_func_entry.func_cname, cinit_args))
+
+ code.putln(
+ "return o;")
+ if needs_error_cleanup:
+ code.putln("bad:")
+ code.put_decref_clear("o", py_object_type, nanny=False)
+ code.putln("return NULL;")
+ code.putln(
+ "}")
+
+ def generate_dealloc_function(self, scope, code):
+ tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
+ slot_func = scope.mangle_internal("tp_dealloc")
+ base_type = scope.parent_type.base_type
+ if tp_slot.slot_code(scope) != slot_func:
+ return # never used
+
+ slot_func_cname = scope.mangle_internal("tp_dealloc")
+ code.putln("")
+ code.putln(
+ "static void %s(PyObject *o) {" % slot_func_cname)
+
+ is_final_type = scope.parent_type.is_final_type
+ needs_gc = scope.needs_gc()
+
+ weakref_slot = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_slot not in scope.var_entries:
+ weakref_slot = None
+
+ dict_slot = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
+ if dict_slot not in scope.var_entries:
+ dict_slot = None
+
+ _, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries()
+ cpp_class_attrs = [entry for entry in scope.var_entries
+ if entry.type.is_cpp_class]
+
+ if py_attrs or cpp_class_attrs or memoryview_slices or weakref_slot or dict_slot:
+ self.generate_self_cast(scope, code)
+
+ if not is_final_type:
+ # in Py3.4+, call tp_finalize() as early as possible
+ code.putln("#if CYTHON_USE_TP_FINALIZE")
+ if needs_gc:
+ finalised_check = '!_PyGC_FINALIZED(o)'
+ else:
+ finalised_check = (
+ '(!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))')
+ code.putln(
+ "if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)"
+ " && Py_TYPE(o)->tp_finalize) && %s) {" % finalised_check)
+ # if instance was resurrected by finaliser, return
+ code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;")
+ code.putln("}")
+ code.putln("#endif")
+
+ if needs_gc:
+ # We must mark this object as (gc) untracked while tearing
+ # it down, lest the garbage collection is invoked while
+ # running this destructor.
+ code.putln("PyObject_GC_UnTrack(o);")
+
+ # call the user's __dealloc__
+ self.generate_usr_dealloc_call(scope, code)
+
+ if weakref_slot:
+ code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);")
+
+ if dict_slot:
+ code.putln("if (p->__dict__) PyDict_Clear(p->__dict__);")
+
+ for entry in cpp_class_attrs:
+ code.putln("__Pyx_call_destructor(p->%s);" % entry.cname)
+
+ for entry in py_attrs:
+ code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False,
+ clear_before_decref=True)
+
+ for entry in memoryview_slices:
+ code.put_xdecref_memoryviewslice("p->%s" % entry.cname,
+ have_gil=True)
+
+ if base_type:
+ if needs_gc:
+ # The base class deallocator probably expects this to be tracked,
+ # so undo the untracking above.
+ if base_type.scope and base_type.scope.needs_gc():
+ code.putln("PyObject_GC_Track(o);")
+ else:
+ code.putln("#if CYTHON_USE_TYPE_SLOTS")
+ code.putln("if (PyType_IS_GC(Py_TYPE(o)->tp_base))")
+ code.putln("#endif")
+ code.putln("PyObject_GC_Track(o);")
+
+ tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot)
+ if tp_dealloc is not None:
+ code.putln("%s(o);" % tp_dealloc)
+ elif base_type.is_builtin_type:
+ code.putln("%s->tp_dealloc(o);" % base_type.typeptr_cname)
+ else:
+ # This is an externally defined type. Calling through the
+ # cimported base type pointer directly interacts badly with
+ # the module cleanup, which may already have cleared it.
+ # In that case, fall back to traversing the type hierarchy.
+ base_cname = base_type.typeptr_cname
+ code.putln("if (likely(%s)) %s->tp_dealloc(o); "
+ "else __Pyx_call_next_tp_dealloc(o, %s);" % (
+ base_cname, base_cname, slot_func_cname))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c"))
+ else:
+ freelist_size = scope.directives.get('freelist', 0)
+ if freelist_size:
+ freelist_name = scope.mangle_internal(Naming.freelist_name)
+ freecount_name = scope.mangle_internal(Naming.freecount_name)
+
+ if is_final_type:
+ type_safety_check = ''
+ else:
+ type_safety_check = (
+ ' & (int)((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)')
+
+ type = scope.parent_type
+ code.putln(
+ "if (CYTHON_COMPILING_IN_CPYTHON & ((int)(%s < %d) & (int)(Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % (
+ freecount_name,
+ freelist_size,
+ type.declaration_code("", deref=True),
+ type_safety_check))
+ code.putln("%s[%s++] = %s;" % (
+ freelist_name, freecount_name, type.cast_code("o")))
+ code.putln("} else {")
+ code.putln("(*Py_TYPE(o)->tp_free)(o);")
+ if freelist_size:
+ code.putln("}")
+ code.putln(
+ "}")
+
+ def generate_usr_dealloc_call(self, scope, code):
+ entry = scope.lookup_here("__dealloc__")
+ if not entry:
+ return
+
+ code.putln("{")
+ code.putln("PyObject *etype, *eval, *etb;")
+ code.putln("PyErr_Fetch(&etype, &eval, &etb);")
+ # increase the refcount while we are calling into user code
+ # to prevent recursive deallocation
+ code.putln("__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);")
+ code.putln("%s(o);" % entry.func_cname)
+ code.putln("__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);")
+ code.putln("PyErr_Restore(etype, eval, etb);")
+ code.putln("}")
+
+ def generate_traverse_function(self, scope, code, cclass_entry):
+ tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
+ slot_func = scope.mangle_internal("tp_traverse")
+ base_type = scope.parent_type.base_type
+ if tp_slot.slot_code(scope) != slot_func:
+ return # never used
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, visitproc v, void *a) {" % slot_func)
+
+ have_entries, (py_attrs, py_buffers, memoryview_slices) = (
+ scope.get_refcounted_entries(include_gc_simple=False))
+
+ if base_type or py_attrs:
+ code.putln("int e;")
+
+ if py_attrs or py_buffers:
+ self.generate_self_cast(scope, code)
+
+ if base_type:
+ # want to call it explicitly if possible so inlining can be performed
+ static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
+ if static_call:
+ code.putln("e = %s(o, v, a); if (e) return e;" % static_call)
+ elif base_type.is_builtin_type:
+ base_cname = base_type.typeptr_cname
+ code.putln("if (!%s->tp_traverse); else { e = %s->tp_traverse(o,v,a); if (e) return e; }" % (
+ base_cname, base_cname))
+ else:
+ # This is an externally defined type. Calling through the
+ # cimported base type pointer directly interacts badly with
+ # the module cleanup, which may already have cleared it.
+ # In that case, fall back to traversing the type hierarchy.
+ base_cname = base_type.typeptr_cname
+ code.putln(
+ "e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : "
+ "__Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % (
+ base_cname, base_cname, base_cname, slot_func))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c"))
+
+ for entry in py_attrs:
+ var_code = "p->%s" % entry.cname
+ var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code)
+ code.putln("if (%s) {" % var_code)
+ code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject)
+ code.putln("}")
+
+ # Traverse buffer exporting objects.
+ # Note: not traversing memoryview attributes of memoryview slices!
+ # When triggered by the GC, it would cause multiple visits (gc_refs
+ # subtractions which is not matched by its reference count!)
+ for entry in py_buffers:
+ cname = entry.cname + ".obj"
+ code.putln("if (p->%s) {" % cname)
+ code.putln("e = (*v)(p->%s, a); if (e) return e;" % cname)
+ code.putln("}")
+
+ code.putln("return 0;")
+ code.putln("}")
+
+ def generate_clear_function(self, scope, code, cclass_entry):
+ tp_slot = TypeSlots.get_slot_by_name("tp_clear")
+ slot_func = scope.mangle_internal("tp_clear")
+ base_type = scope.parent_type.base_type
+ if tp_slot.slot_code(scope) != slot_func:
+ return # never used
+
+ have_entries, (py_attrs, py_buffers, memoryview_slices) = (
+ scope.get_refcounted_entries(include_gc_simple=False))
+
+ if py_attrs or py_buffers or base_type:
+ unused = ''
+ else:
+ unused = 'CYTHON_UNUSED '
+
+ code.putln("")
+ code.putln("static int %s(%sPyObject *o) {" % (slot_func, unused))
+
+ if py_attrs and Options.clear_to_none:
+ code.putln("PyObject* tmp;")
+
+ if py_attrs or py_buffers:
+ self.generate_self_cast(scope, code)
+
+ if base_type:
+ # want to call it explicitly if possible so inlining can be performed
+ static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
+ if static_call:
+ code.putln("%s(o);" % static_call)
+ elif base_type.is_builtin_type:
+ base_cname = base_type.typeptr_cname
+ code.putln("if (!%s->tp_clear); else %s->tp_clear(o);" % (
+ base_cname, base_cname))
+ else:
+ # This is an externally defined type. Calling through the
+ # cimported base type pointer directly interacts badly with
+ # the module cleanup, which may already have cleared it.
+ # In that case, fall back to traversing the type hierarchy.
+ base_cname = base_type.typeptr_cname
+ code.putln(
+ "if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % (
+ base_cname, base_cname, base_cname, slot_func))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c"))
+
+ if Options.clear_to_none:
+ for entry in py_attrs:
+ name = "p->%s" % entry.cname
+ code.putln("tmp = ((PyObject*)%s);" % name)
+ if entry.is_declared_generic:
+ code.put_init_to_py_none(name, py_object_type, nanny=False)
+ else:
+ code.put_init_to_py_none(name, entry.type, nanny=False)
+ code.putln("Py_XDECREF(tmp);")
+ else:
+ for entry in py_attrs:
+ code.putln("Py_CLEAR(p->%s);" % entry.cname)
+
+ for entry in py_buffers:
+ # Note: shouldn't this call __Pyx_ReleaseBuffer ??
+ code.putln("Py_CLEAR(p->%s.obj);" % entry.cname)
+
+ if cclass_entry.cname == '__pyx_memoryviewslice':
+ code.putln("__PYX_XDEC_MEMVIEW(&p->from_slice, 1);")
+
+ code.putln("return 0;")
+ code.putln("}")
+
+ def generate_getitem_int_function(self, scope, code):
+ # This function is put into the sq_item slot when
+ # a __getitem__ method is present. It converts its
+ # argument to a Python integer and calls mp_subscript.
+ code.putln(
+ "static PyObject *%s(PyObject *o, Py_ssize_t i) {" % (
+ scope.mangle_internal("sq_item")))
+ code.putln(
+ "PyObject *r;")
+ code.putln(
+ "PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;")
+ code.putln(
+ "r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);")
+ code.putln(
+ "Py_DECREF(x);")
+ code.putln(
+ "return r;")
+ code.putln(
+ "}")
+
+ def generate_ass_subscript_function(self, scope, code):
+ # Setting and deleting an item are both done through
+ # the ass_subscript method, so we dispatch to user's __setitem__
+ # or __delitem__, or raise an exception.
+ base_type = scope.parent_type.base_type
+ set_entry = scope.lookup_here("__setitem__")
+ del_entry = scope.lookup_here("__delitem__")
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % (
+ scope.mangle_internal("mp_ass_subscript")))
+ code.putln(
+ "if (v) {")
+ if set_entry:
+ code.putln("return %s(o, i, v);" % set_entry.func_cname)
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
+ code.putln(
+ "PyErr_Format(PyExc_NotImplementedError,")
+ code.putln(
+ ' "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "else {")
+ if del_entry:
+ code.putln(
+ "return %s(o, i);" % (
+ del_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
+ code.putln(
+ "PyErr_Format(PyExc_NotImplementedError,")
+ code.putln(
+ ' "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "}")
+
+ def generate_guarded_basetype_call(
+ self, base_type, substructure, slot, args, code):
+ if base_type:
+ base_tpname = base_type.typeptr_cname
+ if substructure:
+ code.putln(
+ "if (%s->%s && %s->%s->%s)" % (
+ base_tpname, substructure, base_tpname, substructure, slot))
+ code.putln(
+ " return %s->%s->%s(%s);" % (
+ base_tpname, substructure, slot, args))
+ else:
+ code.putln(
+ "if (%s->%s)" % (
+ base_tpname, slot))
+ code.putln(
+ " return %s->%s(%s);" % (
+ base_tpname, slot, args))
+
+ def generate_ass_slice_function(self, scope, code):
+ # Setting and deleting a slice are both done through
+ # the ass_slice method, so we dispatch to user's __setslice__
+ # or __delslice__, or raise an exception.
+ base_type = scope.parent_type.base_type
+ set_entry = scope.lookup_here("__setslice__")
+ del_entry = scope.lookup_here("__delslice__")
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" % (
+ scope.mangle_internal("sq_ass_slice")))
+ code.putln(
+ "if (v) {")
+ if set_entry:
+ code.putln(
+ "return %s(o, i, j, v);" % (
+ set_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
+ code.putln(
+ "PyErr_Format(PyExc_NotImplementedError,")
+ code.putln(
+ ' "2-element slice assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "else {")
+ if del_entry:
+ code.putln(
+ "return %s(o, i, j);" % (
+ del_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
+ code.putln(
+ "PyErr_Format(PyExc_NotImplementedError,")
+ code.putln(
+ ' "2-element slice deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "}")
+
+ def generate_richcmp_function(self, scope, code):
+ if scope.lookup_here("__richcmp__"):
+ # user implemented, nothing to do
+ return
+ # otherwise, we have to generate it from the Python special methods
+ richcmp_cfunc = scope.mangle_internal("tp_richcompare")
+ code.putln("")
+ code.putln("static PyObject *%s(PyObject *o1, PyObject *o2, int op) {" % richcmp_cfunc)
+ code.putln("switch (op) {")
+
+ class_scopes = []
+ cls = scope.parent_type
+ while cls is not None and not cls.entry.visibility == 'extern':
+ class_scopes.append(cls.scope)
+ cls = cls.scope.parent_type.base_type
+ assert scope in class_scopes
+
+ extern_parent = None
+ if cls and cls.entry.visibility == 'extern':
+ # need to call up into base classes as we may not know all implemented comparison methods
+ extern_parent = cls if cls.typeptr_cname else scope.parent_type.base_type
+
+ eq_entry = None
+ has_ne = False
+ for cmp_method in TypeSlots.richcmp_special_methods:
+ for class_scope in class_scopes:
+ entry = class_scope.lookup_here(cmp_method)
+ if entry is not None:
+ break
+ else:
+ continue
+
+ cmp_type = cmp_method.strip('_').upper() # e.g. "__eq__" -> EQ
+ code.putln("case Py_%s: {" % cmp_type)
+ if cmp_method == '__eq__':
+ eq_entry = entry
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_True);")
+ elif cmp_method == '__ne__':
+ has_ne = True
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
+ code.putln("return %s(o1, o2);" % entry.func_cname)
+ code.putln("}")
+
+ if eq_entry and not has_ne and not extern_parent:
+ code.putln("case Py_NE: {")
+ code.putln("PyObject *ret;")
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
+ code.putln("ret = %s(o1, o2);" % eq_entry.func_cname)
+ code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
+ code.putln("int b = __Pyx_PyObject_IsTrue(ret); Py_DECREF(ret);")
+ code.putln("if (unlikely(b < 0)) return NULL;")
+ code.putln("ret = (b) ? Py_False : Py_True;")
+ code.putln("Py_INCREF(ret);")
+ code.putln("}")
+ code.putln("return ret;")
+ code.putln("}")
+
+ code.putln("default: {")
+ if extern_parent and extern_parent.typeptr_cname:
+ code.putln("if (likely(%s->tp_richcompare)) return %s->tp_richcompare(o1, o2, op);" % (
+ extern_parent.typeptr_cname, extern_parent.typeptr_cname))
+ code.putln("return __Pyx_NewRef(Py_NotImplemented);")
+ code.putln("}")
+
+ code.putln("}") # switch
+ code.putln("}")
+
+ def generate_getattro_function(self, scope, code):
+ # First try to get the attribute using __getattribute__, if defined, or
+ # PyObject_GenericGetAttr.
+ #
+ # If that raises an AttributeError, call the __getattr__ if defined.
+ #
+ # In both cases, defined can be in this class, or any base class.
+ def lookup_here_or_base(n, tp=None, extern_return=None):
+ # Recursive lookup
+ if tp is None:
+ tp = scope.parent_type
+ r = tp.scope.lookup_here(n)
+ if r is None:
+ if tp.is_external and extern_return is not None:
+ return extern_return
+ if tp.base_type is not None:
+ return lookup_here_or_base(n, tp.base_type)
+ return r
+
+ has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern")
+ getattr_entry = lookup_here_or_base("__getattr__")
+ getattribute_entry = lookup_here_or_base("__getattribute__")
+ code.putln("")
+ code.putln(
+ "static PyObject *%s(PyObject *o, PyObject *n) {" % (
+ scope.mangle_internal("tp_getattro")))
+ if getattribute_entry is not None:
+ code.putln(
+ "PyObject *v = %s(o, n);" % (
+ getattribute_entry.func_cname))
+ else:
+ if not has_instance_dict and scope.parent_type.is_final_type:
+ # Final with no dict => use faster type attribute lookup.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict"
+ elif not has_instance_dict or has_instance_dict == "extern":
+ # No dict in the known ancestors, but don't know about extern ancestors or subtypes.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ else:
+ generic_getattr_cfunc = "PyObject_GenericGetAttr"
+ code.putln(
+ "PyObject *v = %s(o, n);" % generic_getattr_cfunc)
+ if getattr_entry is not None:
+ code.putln(
+ "if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
+ code.putln(
+ "PyErr_Clear();")
+ code.putln(
+ "v = %s(o, n);" % (
+ getattr_entry.func_cname))
+ code.putln(
+ "}")
+ code.putln(
+ "return v;")
+ code.putln(
+ "}")
+
+ def generate_setattro_function(self, scope, code):
+ # Setting and deleting an attribute are both done through
+ # the setattro method, so we dispatch to user's __setattr__
+ # or __delattr__ or fall back on PyObject_GenericSetAttr.
+ base_type = scope.parent_type.base_type
+ set_entry = scope.lookup_here("__setattr__")
+ del_entry = scope.lookup_here("__delattr__")
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, PyObject *n, PyObject *v) {" % (
+ scope.mangle_internal("tp_setattro")))
+ code.putln(
+ "if (v) {")
+ if set_entry:
+ code.putln(
+ "return %s(o, n, v);" % (
+ set_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, None, "tp_setattro", "o, n, v", code)
+ code.putln(
+ "return PyObject_GenericSetAttr(o, n, v);")
+ code.putln(
+ "}")
+ code.putln(
+ "else {")
+ if del_entry:
+ code.putln(
+ "return %s(o, n);" % (
+ del_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, None, "tp_setattro", "o, n, v", code)
+ code.putln(
+ "return PyObject_GenericSetAttr(o, n, 0);")
+ code.putln(
+ "}")
+ code.putln(
+ "}")
+
+ def generate_descr_get_function(self, scope, code):
+ # The __get__ function of a descriptor object can be
+ # called with NULL for the second or third arguments
+ # under some circumstances, so we replace them with
+ # None in that case.
+ user_get_entry = scope.lookup_here("__get__")
+ code.putln("")
+ code.putln(
+ "static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" % (
+ scope.mangle_internal("tp_descr_get")))
+ code.putln(
+ "PyObject *r = 0;")
+ code.putln(
+ "if (!i) i = Py_None;")
+ code.putln(
+ "if (!c) c = Py_None;")
+ #code.put_incref("i", py_object_type)
+ #code.put_incref("c", py_object_type)
+ code.putln(
+ "r = %s(o, i, c);" % (
+ user_get_entry.func_cname))
+ #code.put_decref("i", py_object_type)
+ #code.put_decref("c", py_object_type)
+ code.putln(
+ "return r;")
+ code.putln(
+ "}")
+
+ def generate_descr_set_function(self, scope, code):
+ # Setting and deleting are both done through the __set__
+ # method of a descriptor, so we dispatch to user's __set__
+ # or __delete__ or raise an exception.
+ base_type = scope.parent_type.base_type
+ user_set_entry = scope.lookup_here("__set__")
+ user_del_entry = scope.lookup_here("__delete__")
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, PyObject *i, PyObject *v) {" % (
+ scope.mangle_internal("tp_descr_set")))
+ code.putln(
+ "if (v) {")
+ if user_set_entry:
+ code.putln(
+ "return %s(o, i, v);" % (
+ user_set_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, None, "tp_descr_set", "o, i, v", code)
+ code.putln(
+ 'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "else {")
+ if user_del_entry:
+ code.putln(
+ "return %s(o, i);" % (
+ user_del_entry.func_cname))
+ else:
+ self.generate_guarded_basetype_call(
+ base_type, None, "tp_descr_set", "o, i, v", code)
+ code.putln(
+ 'PyErr_SetString(PyExc_NotImplementedError, "__delete__");')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "}")
+
+ def generate_property_accessors(self, cclass_scope, code):
+ for entry in cclass_scope.property_entries:
+ property_scope = entry.scope
+ if property_scope.defines_any(["__get__"]):
+ self.generate_property_get_function(entry, code)
+ if property_scope.defines_any(["__set__", "__del__"]):
+ self.generate_property_set_function(entry, code)
+
+ def generate_property_get_function(self, property_entry, code):
+ property_scope = property_entry.scope
+ property_entry.getter_cname = property_scope.parent_scope.mangle(
+ Naming.prop_get_prefix, property_entry.name)
+ get_entry = property_scope.lookup_here("__get__")
+ code.putln("")
+ code.putln(
+ "static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % (
+ property_entry.getter_cname))
+ code.putln(
+ "return %s(o);" % (
+ get_entry.func_cname))
+ code.putln(
+ "}")
+
+ def generate_property_set_function(self, property_entry, code):
+ property_scope = property_entry.scope
+ property_entry.setter_cname = property_scope.parent_scope.mangle(
+ Naming.prop_set_prefix, property_entry.name)
+ set_entry = property_scope.lookup_here("__set__")
+ del_entry = property_scope.lookup_here("__del__")
+ code.putln("")
+ code.putln(
+ "static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" % (
+ property_entry.setter_cname))
+ code.putln(
+ "if (v) {")
+ if set_entry:
+ code.putln(
+ "return %s(o, v);" % (
+ set_entry.func_cname))
+ else:
+ code.putln(
+ 'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "else {")
+ if del_entry:
+ code.putln(
+ "return %s(o);" % (
+ del_entry.func_cname))
+ else:
+ code.putln(
+ 'PyErr_SetString(PyExc_NotImplementedError, "__del__");')
+ code.putln(
+ "return -1;")
+ code.putln(
+ "}")
+ code.putln(
+ "}")
+
+ def generate_typeobj_definition(self, modname, entry, code):
+ type = entry.type
+ scope = type.scope
+ for suite in TypeSlots.substructures:
+ suite.generate_substructure(scope, code)
+ code.putln("")
+ if entry.visibility == 'public':
+ header = "DL_EXPORT(PyTypeObject) %s = {"
+ else:
+ header = "static PyTypeObject %s = {"
+ #code.putln(header % scope.parent_type.typeobj_cname)
+ code.putln(header % type.typeobj_cname)
+ code.putln(
+ "PyVarObject_HEAD_INIT(0, 0)")
+ code.putln(
+ '"%s.%s", /*tp_name*/' % (
+ self.full_module_name, scope.class_name))
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln(
+ "sizeof(%s), /*tp_basicsize*/" % objstruct)
+ code.putln(
+ "0, /*tp_itemsize*/")
+ for slot in TypeSlots.slot_table:
+ slot.generate(scope, code)
+ code.putln(
+ "};")
+
+ def generate_method_table(self, env, code):
+ if env.is_c_class_scope and not env.pyfunc_entries:
+ return
+ binding = env.directives['binding']
+
+ code.putln("")
+ wrapper_code_writer = code.insertion_point()
+
+ code.putln(
+ "static PyMethodDef %s[] = {" % (
+ env.method_table_cname))
+ for entry in env.pyfunc_entries:
+ if not entry.fused_cfunction and not (binding and entry.is_overridable):
+ code.put_pymethoddef(entry, ",", wrapper_code_writer=wrapper_code_writer)
+ code.putln(
+ "{0, 0, 0, 0}")
+ code.putln(
+ "};")
+
+ if wrapper_code_writer.getvalue():
+ wrapper_code_writer.putln("")
+
+ def generate_dict_getter_function(self, scope, code):
+ dict_attr = scope.lookup_here("__dict__")
+ if not dict_attr or not dict_attr.is_variable:
+ return
+ func_name = scope.mangle_internal("__dict__getter")
+ dict_name = dict_attr.cname
+ code.putln("")
+ code.putln("static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" % func_name)
+ self.generate_self_cast(scope, code)
+ code.putln("if (unlikely(!p->%s)){" % dict_name)
+ code.putln("p->%s = PyDict_New();" % dict_name)
+ code.putln("}")
+ code.putln("Py_XINCREF(p->%s);" % dict_name)
+ code.putln("return p->%s;" % dict_name)
+ code.putln("}")
+
+ def generate_getset_table(self, env, code):
+ if env.property_entries:
+ code.putln("")
+ code.putln(
+ "static struct PyGetSetDef %s[] = {" %
+ env.getset_table_cname)
+ for entry in env.property_entries:
+ doc = entry.doc
+ if doc:
+ if doc.is_unicode:
+ doc = doc.as_utf8_string()
+ doc_code = doc.as_c_string_literal()
+ else:
+ doc_code = "0"
+ code.putln(
+ '{(char *)"%s", %s, %s, (char *)%s, 0},' % (
+ entry.name,
+ entry.getter_cname or "0",
+ entry.setter_cname or "0",
+ doc_code))
+ code.putln(
+ "{0, 0, 0, 0, 0}")
+ code.putln(
+ "};")
+
+ def create_import_star_conversion_utility_code(self, env):
+ # Create all conversion helpers that are needed for "import *" assignments.
+ # Must be done before code generation to support CythonUtilityCode.
+ for name, entry in sorted(env.entries.items()):
+ if entry.is_cglobal and entry.used:
+ if not entry.type.is_pyobject:
+ entry.type.create_from_py_utility_code(env)
+
+ def generate_import_star(self, env, code):
+ env.use_utility_code(UtilityCode.load_cached("CStringEquals", "StringTools.c"))
+ code.putln()
+ code.enter_cfunc_scope() # as we need labels
+ code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set)
+
+ code.putln("static const char* internal_type_names[] = {")
+ for name, entry in sorted(env.entries.items()):
+ if entry.is_type:
+ code.putln('"%s",' % name)
+ code.putln("0")
+ code.putln("};")
+
+ code.putln("const char** type_name = internal_type_names;")
+ code.putln("while (*type_name) {")
+ code.putln("if (__Pyx_StrEq(name, *type_name)) {")
+ code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);')
+ code.putln('goto bad;')
+ code.putln("}")
+ code.putln("type_name++;")
+ code.putln("}")
+
+ old_error_label = code.new_error_label()
+ code.putln("if (0);") # so the first one can be "else if"
+ msvc_count = 0
+ for name, entry in sorted(env.entries.items()):
+ if entry.is_cglobal and entry.used and not entry.type.is_const:
+ msvc_count += 1
+ if msvc_count % 100 == 0:
+ code.putln("#ifdef _MSC_VER")
+ code.putln("if (0); /* Workaround for MSVC C1061. */")
+ code.putln("#endif")
+ code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name)
+ if entry.type.is_pyobject:
+ if entry.type.is_extension_type or entry.type.is_builtin_type:
+ code.putln("if (!(%s)) %s;" % (
+ entry.type.type_test_code("o"),
+ code.error_goto(entry.pos)))
+ code.putln("Py_INCREF(o);")
+ code.put_decref(entry.cname, entry.type, nanny=False)
+ code.putln("%s = %s;" % (
+ entry.cname,
+ PyrexTypes.typecast(entry.type, py_object_type, "o")))
+ elif entry.type.create_from_py_utility_code(env):
+ # if available, utility code was already created in self.prepare_utility_code()
+ code.putln(entry.type.from_py_call_code(
+ 'o', entry.cname, entry.pos, code))
+ else:
+ code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % (
+ name, entry.type))
+ code.putln(code.error_goto(entry.pos))
+ code.putln("}")
+ code.putln("else {")
+ code.putln("if (PyObject_SetAttr(%s, py_name, o) < 0) goto bad;" % Naming.module_cname)
+ code.putln("}")
+ code.putln("return 0;")
+ if code.label_used(code.error_label):
+ code.put_label(code.error_label)
+ # This helps locate the offending name.
+ code.put_add_traceback(self.full_module_name)
+ code.error_label = old_error_label
+ code.putln("bad:")
+ code.putln("return -1;")
+ code.putln("}")
+ code.putln("")
+ code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1])
+ code.exit_cfunc_scope() # done with labels
+
+ def generate_module_init_func(self, imported_modules, env, options, code):
+ subfunction = self.mod_init_subfunction(self.pos, self.scope, code)
+
+ code.enter_cfunc_scope(self.scope)
+ code.putln("")
+ code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
+ init_name = 'init' + (options.init_suffix or env.module_name)
+ header2 = "__Pyx_PyMODINIT_FUNC %s(void)" % init_name
+ header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env, options)
+ code.putln("#if PY_MAJOR_VERSION < 3")
+ # Optimise for small code size as the module init function is only executed once.
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
+ code.putln(header2)
+ code.putln("#else")
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
+ code.putln(header3)
+
+ # CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("{")
+ code.putln("return PyModuleDef_Init(&%s);" % Naming.pymoduledef_cname)
+ code.putln("}")
+
+ mod_create_func = UtilityCode.load_as_string("ModuleCreationPEP489", "ModuleSetupCode.c")[1]
+ code.put(mod_create_func)
+
+ code.putln("")
+ # main module init code lives in Py_mod_exec function, not in PyInit function
+ code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % (
+ self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env),
+ Naming.pymodinit_module_arg))
+ code.putln("#endif") # PEP489
+
+ code.putln("#endif") # Py3
+
+ # start of module init/exec function (pre/post PEP 489)
+ code.putln("{")
+
+ tempdecl_code = code.insertion_point()
+
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c"))
+
+ code.put_declare_refcount_context()
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ # Most extension modules simply can't deal with it, and Cython isn't ready either.
+ # See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support
+ code.putln("if (%s) {" % Naming.module_cname)
+ # Hack: enforce single initialisation.
+ code.putln("if (%s == %s) return 0;" % (
+ Naming.module_cname,
+ Naming.pymodinit_module_arg,
+ ))
+ code.putln('PyErr_SetString(PyExc_RuntimeError,'
+ ' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' %
+ env.module_name)
+ code.putln("return -1;")
+ code.putln("}")
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
+ code.putln("if (%s) return __Pyx_NewRef(%s);" % (
+ Naming.module_cname,
+ Naming.module_cname,
+ ))
+ code.putln("#endif")
+
+ if profile or linetrace:
+ tempdecl_code.put_trace_declarations()
+ code.put_trace_frame_init()
+
+ refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1]
+ code.putln(refnanny_import_code.rstrip())
+ code.put_setup_refcount_context(header3)
+
+ env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
+ code.put_error_if_neg(self.pos, "__Pyx_check_binary_version()")
+
+ code.putln("#ifdef __Pxy_PyFrame_Initialize_Offsets")
+ code.putln("__Pxy_PyFrame_Initialize_Offsets();")
+ code.putln("#endif")
+ code.putln("%s = PyTuple_New(0); %s" % (
+ Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos)))
+ code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % (
+ Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos)))
+ code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % (
+ Naming.empty_unicode, code.error_goto_if_null(Naming.empty_unicode, self.pos)))
+
+ for ext_type in ('CyFunction', 'FusedFunction', 'Coroutine', 'Generator', 'AsyncGen', 'StopAsyncIteration'):
+ code.putln("#ifdef __Pyx_%s_USED" % ext_type)
+ code.put_error_if_neg(self.pos, "__pyx_%s_init()" % ext_type)
+ code.putln("#endif")
+
+ code.putln("/*--- Library function declarations ---*/")
+ if env.directives['np_pythran']:
+ code.put_error_if_neg(self.pos, "_import_array()")
+
+ code.putln("/*--- Threads initialization code ---*/")
+ code.putln("#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 "
+ "&& defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS")
+ code.putln("PyEval_InitThreads();")
+ code.putln("#endif")
+
+ code.putln("/*--- Module creation code ---*/")
+ self.generate_module_creation_code(env, options, code)
+
+ code.putln("/*--- Initialize various global constants etc. ---*/")
+ code.put_error_if_neg(self.pos, "__Pyx_InitGlobals()")
+
+ code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || "
+ "__PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)")
+ code.put_error_if_neg(self.pos, "__Pyx_init_sys_getdefaultencoding_params()")
+ code.putln("#endif")
+
+ code.putln("if (%s%s) {" % (Naming.module_is_main, self.full_module_name.replace('.', '__')))
+ code.put_error_if_neg(self.pos, 'PyObject_SetAttr(%s, %s, %s)' % (
+ env.module_cname,
+ code.intern_identifier(EncodedString("__name__")),
+ code.intern_identifier(EncodedString("__main__"))))
+ code.putln("}")
+
+ # set up __file__ and __path__, then add the module to sys.modules
+ self.generate_module_import_setup(env, code)
+
+ if Options.cache_builtins:
+ code.putln("/*--- Builtin init code ---*/")
+ code.put_error_if_neg(self.pos, "__Pyx_InitCachedBuiltins()")
+
+ code.putln("/*--- Constants init code ---*/")
+ code.put_error_if_neg(self.pos, "__Pyx_InitCachedConstants()")
+
+ code.putln("/*--- Global type/function init code ---*/")
+
+ with subfunction("Global init code") as inner_code:
+ self.generate_global_init_code(env, inner_code)
+
+ with subfunction("Variable export code") as inner_code:
+ self.generate_c_variable_export_code(env, inner_code)
+
+ with subfunction("Function export code") as inner_code:
+ self.generate_c_function_export_code(env, inner_code)
+
+ with subfunction("Type init code") as inner_code:
+ self.generate_type_init_code(env, inner_code)
+
+ with subfunction("Type import code") as inner_code:
+ for module in imported_modules:
+ self.generate_type_import_code_for_module(module, env, inner_code)
+
+ with subfunction("Variable import code") as inner_code:
+ for module in imported_modules:
+ self.generate_c_variable_import_code_for_module(module, env, inner_code)
+
+ with subfunction("Function import code") as inner_code:
+ for module in imported_modules:
+ self.specialize_fused_types(module)
+ self.generate_c_function_import_code_for_module(module, env, inner_code)
+
+ code.putln("/*--- Execution code ---*/")
+ code.mark_pos(None)
+
+ code.putln("#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)")
+ code.put_error_if_neg(self.pos, "__Pyx_patch_abc()")
+ code.putln("#endif")
+
+ if profile or linetrace:
+ code.put_trace_call(header3, self.pos, nogil=not code.funcstate.gil_owned)
+ code.funcstate.can_trace = True
+
+ self.body.generate_execution_code(code)
+
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+ code.put_trace_return("Py_None", nogil=not code.funcstate.gil_owned)
+
+ code.putln()
+ code.putln("/*--- Wrapped vars code ---*/")
+ self.generate_wrapped_entries_code(env, code)
+ code.putln()
+
+ if Options.generate_cleanup_code:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RegisterModuleCleanup", "ModuleSetupCode.c"))
+ code.putln("if (__Pyx_RegisterCleanup()) %s" % code.error_goto(self.pos))
+
+ code.put_goto(code.return_label)
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.putln('if (%s) {' % env.module_cname)
+ code.putln('if (%s) {' % env.module_dict_cname)
+ code.put_add_traceback("init %s" % env.qualified_name)
+ code.globalstate.use_utility_code(Nodes.traceback_utility_code)
+ # Module reference and module dict are in global variables which might still be needed
+ # for cleanup, atexit code, etc., so leaking is better than crashing.
+ # At least clearing the module dict here might be a good idea, but could still break
+ # user code in atexit or other global registries.
+ ##code.put_decref_clear(env.module_dict_cname, py_object_type, nanny=False)
+ code.putln('}')
+ code.put_decref_clear(env.module_cname, py_object_type, nanny=False, clear_before_decref=True)
+ code.putln('} else if (!PyErr_Occurred()) {')
+ code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' % env.qualified_name)
+ code.putln('}')
+ code.put_label(code.return_label)
+
+ code.put_finish_refcount_context()
+
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("return (%s != NULL) ? 0 : -1;" % env.module_cname)
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ code.putln("return %s;" % env.module_cname)
+ code.putln("#else")
+ code.putln("return;")
+ code.putln("#endif")
+ code.putln('}')
+
+ tempdecl_code.put_temp_declarations(code.funcstate)
+
+ code.exit_cfunc_scope()
+
+ def mod_init_subfunction(self, pos, scope, orig_code):
+ """
+ Return a context manager that allows deviating the module init code generation
+ into a separate function and instead inserts a call to it.
+
+ Can be reused sequentially to create multiple functions.
+ The functions get inserted at the point where the context manager was created.
+ The call gets inserted where the context manager is used (on entry).
+ """
+ prototypes = orig_code.insertion_point()
+ prototypes.putln("")
+ function_code = orig_code.insertion_point()
+ function_code.putln("")
+
+ class ModInitSubfunction(object):
+ def __init__(self, code_type):
+ cname = '_'.join(code_type.lower().split())
+ assert re.match("^[a-z0-9_]+$", cname)
+ self.cfunc_name = "__Pyx_modinit_%s" % cname
+ self.description = code_type
+ self.tempdecl_code = None
+ self.call_code = None
+
+ def __enter__(self):
+ self.call_code = orig_code.insertion_point()
+ code = function_code
+ code.enter_cfunc_scope(scope)
+ prototypes.putln("static CYTHON_SMALL_CODE int %s(void); /*proto*/" % self.cfunc_name)
+ code.putln("static int %s(void) {" % self.cfunc_name)
+ code.put_declare_refcount_context()
+ self.tempdecl_code = code.insertion_point()
+ code.put_setup_refcount_context(self.cfunc_name)
+ # Leave a grepable marker that makes it easy to find the generator source.
+ code.putln("/*--- %s ---*/" % self.description)
+ return code
+
+ def __exit__(self, *args):
+ code = function_code
+ code.put_finish_refcount_context()
+ code.putln("return 0;")
+
+ self.tempdecl_code.put_temp_declarations(code.funcstate)
+ self.tempdecl_code = None
+
+ needs_error_handling = code.label_used(code.error_label)
+ if needs_error_handling:
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.put_finish_refcount_context()
+ code.putln("return -1;")
+ code.putln("}")
+ code.exit_cfunc_scope()
+ code.putln("")
+
+ if needs_error_handling:
+ self.call_code.putln(
+ self.call_code.error_goto_if_neg("%s()" % self.cfunc_name, pos))
+ else:
+ self.call_code.putln("(void)%s();" % self.cfunc_name)
+ self.call_code = None
+
+ return ModInitSubfunction
+
+ def generate_module_import_setup(self, env, code):
+ module_path = env.directives['set_initial_path']
+ if module_path == 'SOURCEFILE':
+ module_path = self.pos[0].filename
+
+ if module_path:
+ code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
+ code.putln('if (PyObject_SetAttrString(%s, "__file__", %s) < 0) %s;' % (
+ env.module_cname,
+ code.globalstate.get_py_string_const(
+ EncodedString(decode_filename(module_path))).cname,
+ code.error_goto(self.pos)))
+ code.putln("}")
+
+ if env.is_package:
+ # set __path__ to mark the module as package
+ code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
+ temp = code.funcstate.allocate_temp(py_object_type, True)
+ code.putln('%s = Py_BuildValue("[O]", %s); %s' % (
+ temp,
+ code.globalstate.get_py_string_const(
+ EncodedString(decode_filename(
+ os.path.dirname(module_path)))).cname,
+ code.error_goto_if_null(temp, self.pos)))
+ code.put_gotref(temp)
+ code.putln(
+ 'if (PyObject_SetAttrString(%s, "__path__", %s) < 0) %s;' % (
+ env.module_cname, temp, code.error_goto(self.pos)))
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+ code.putln("}")
+
+ elif env.is_package:
+ # packages require __path__, so all we can do is try to figure
+ # out the module path at runtime by rerunning the import lookup
+ code.putln("if (!CYTHON_PEP489_MULTI_PHASE_INIT) {")
+ code.globalstate.use_utility_code(UtilityCode.load(
+ "SetPackagePathFromImportLib", "ImportExport.c"))
+ code.putln(code.error_goto_if_neg(
+ '__Pyx_SetPackagePathFromImportLib(%s)' % (
+ code.globalstate.get_py_string_const(
+ EncodedString(self.full_module_name)).cname),
+ self.pos))
+ code.putln("}")
+
+ # CPython may not have put us into sys.modules yet, but relative imports and reimports require it
+ fq_module_name = self.full_module_name
+ if fq_module_name.endswith('.__init__'):
+ fq_module_name = fq_module_name[:-len('.__init__')]
+ code.putln("#if PY_MAJOR_VERSION >= 3")
+ code.putln("{")
+ code.putln("PyObject *modules = PyImport_GetModuleDict(); %s" %
+ code.error_goto_if_null("modules", self.pos))
+ code.putln('if (!PyDict_GetItemString(modules, "%s")) {' % fq_module_name)
+ code.putln(code.error_goto_if_neg('PyDict_SetItemString(modules, "%s", %s)' % (
+ fq_module_name, env.module_cname), self.pos))
+ code.putln("}")
+ code.putln("}")
+ code.putln("#endif")
+
+ def generate_module_cleanup_func(self, env, code):
+ if not Options.generate_cleanup_code:
+ return
+
+ code.putln('static void %s(CYTHON_UNUSED PyObject *self) {' %
+ Naming.cleanup_cname)
+ code.enter_cfunc_scope(env)
+
+ if Options.generate_cleanup_code >= 2:
+ code.putln("/*--- Global cleanup code ---*/")
+ rev_entries = list(env.var_entries)
+ rev_entries.reverse()
+ for entry in rev_entries:
+ if entry.visibility != 'extern':
+ if entry.type.is_pyobject and entry.used:
+ code.put_xdecref_clear(
+ entry.cname, entry.type,
+ clear_before_decref=True,
+ nanny=False)
+ code.putln("__Pyx_CleanupGlobals();")
+ if Options.generate_cleanup_code >= 3:
+ code.putln("/*--- Type import cleanup code ---*/")
+ for ext_type in sorted(env.types_imported, key=operator.attrgetter('typeptr_cname')):
+ code.put_xdecref_clear(
+ ext_type.typeptr_cname, ext_type,
+ clear_before_decref=True,
+ nanny=False)
+ if Options.cache_builtins:
+ code.putln("/*--- Builtin cleanup code ---*/")
+ for entry in env.cached_builtins:
+ code.put_xdecref_clear(
+ entry.cname, PyrexTypes.py_object_type,
+ clear_before_decref=True,
+ nanny=False)
+ code.putln("/*--- Intern cleanup code ---*/")
+ code.put_decref_clear(Naming.empty_tuple,
+ PyrexTypes.py_object_type,
+ clear_before_decref=True,
+ nanny=False)
+ for entry in env.c_class_entries:
+ cclass_type = entry.type
+ if cclass_type.is_external or cclass_type.base_type:
+ continue
+ if cclass_type.scope.directives.get('freelist', 0):
+ scope = cclass_type.scope
+ freelist_name = scope.mangle_internal(Naming.freelist_name)
+ freecount_name = scope.mangle_internal(Naming.freecount_name)
+ code.putln("while (%s > 0) {" % freecount_name)
+ code.putln("PyObject* o = (PyObject*)%s[--%s];" % (
+ freelist_name, freecount_name))
+ code.putln("(*Py_TYPE(o)->tp_free)(o);")
+ code.putln("}")
+# for entry in env.pynum_entries:
+# code.put_decref_clear(entry.cname,
+# PyrexTypes.py_object_type,
+# nanny=False)
+# for entry in env.all_pystring_entries:
+# if entry.is_interned:
+# code.put_decref_clear(entry.pystring_cname,
+# PyrexTypes.py_object_type,
+# nanny=False)
+# for entry in env.default_entries:
+# if entry.type.is_pyobject and entry.used:
+# code.putln("Py_DECREF(%s); %s = 0;" % (
+# code.entry_as_pyobject(entry), entry.cname))
+ if Options.pre_import is not None:
+ code.put_decref_clear(Naming.preimport_cname, py_object_type,
+ nanny=False, clear_before_decref=True)
+ for cname in [env.module_dict_cname, Naming.cython_runtime_cname, Naming.builtins_cname]:
+ code.put_decref_clear(cname, py_object_type, nanny=False, clear_before_decref=True)
+
+ def generate_main_method(self, env, code):
+ module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
+ if Options.embed == "main":
+ wmain = "wmain"
+ else:
+ wmain = Options.embed
+ main_method = UtilityCode.load_cached("MainFunction", "Embed.c")
+ code.globalstate.use_utility_code(
+ main_method.specialize(
+ module_name=env.module_name,
+ module_is_main=module_is_main,
+ main_method=Options.embed,
+ wmain_method=wmain))
+
+ def mod_init_func_cname(self, prefix, env, options=None):
+ return '%s_%s' % (prefix, options and options.init_suffix or env.module_name)
+
+ def generate_pymoduledef_struct(self, env, options, code):
+ if env.doc:
+ doc = "%s" % code.get_string_const(env.doc)
+ else:
+ doc = "0"
+ if Options.generate_cleanup_code:
+ cleanup_func = "(freefunc)%s" % Naming.cleanup_cname
+ else:
+ cleanup_func = 'NULL'
+
+ code.putln("")
+ code.putln("#if PY_MAJOR_VERSION >= 3")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ exec_func_cname = self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env)
+ code.putln("static PyObject* %s(PyObject *spec, PyModuleDef *def); /*proto*/" %
+ Naming.pymodule_create_func_cname)
+ code.putln("static int %s(PyObject* module); /*proto*/" % exec_func_cname)
+
+ code.putln("static PyModuleDef_Slot %s[] = {" % Naming.pymoduledef_slots_cname)
+ code.putln("{Py_mod_create, (void*)%s}," % Naming.pymodule_create_func_cname)
+ code.putln("{Py_mod_exec, (void*)%s}," % exec_func_cname)
+ code.putln("{0, NULL}")
+ code.putln("};")
+ code.putln("#endif")
+
+ code.putln("")
+ code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname)
+ code.putln(" PyModuleDef_HEAD_INIT,")
+ code.putln(' "%s",' % (options.module_name or env.module_name))
+ code.putln(" %s, /* m_doc */" % doc)
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln(" 0, /* m_size */")
+ code.putln("#else")
+ code.putln(" -1, /* m_size */")
+ code.putln("#endif")
+ code.putln(" %s /* m_methods */," % env.method_table_cname)
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln(" %s, /* m_slots */" % Naming.pymoduledef_slots_cname)
+ code.putln("#else")
+ code.putln(" NULL, /* m_reload */")
+ code.putln("#endif")
+ code.putln(" NULL, /* m_traverse */")
+ code.putln(" NULL, /* m_clear */")
+ code.putln(" %s /* m_free */" % cleanup_func)
+ code.putln("};")
+ code.putln("#endif")
+
+ def generate_module_creation_code(self, env, options, code):
+ # Generate code to create the module object and
+ # install the builtins.
+ if env.doc:
+ doc = "%s" % code.get_string_const(env.doc)
+ else:
+ doc = "0"
+
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("%s = %s;" % (
+ env.module_cname,
+ Naming.pymodinit_module_arg))
+ code.put_incref(env.module_cname, py_object_type, nanny=False)
+ code.putln("#else")
+ code.putln("#if PY_MAJOR_VERSION < 3")
+ code.putln(
+ '%s = Py_InitModule4("%s", %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % (
+ env.module_cname,
+ options.module_name or env.module_name,
+ env.method_table_cname,
+ doc,
+ env.module_cname))
+ code.putln("#else")
+ code.putln(
+ "%s = PyModule_Create(&%s);" % (
+ env.module_cname,
+ Naming.pymoduledef_cname))
+ code.putln("#endif")
+ code.putln(code.error_goto_if_null(env.module_cname, self.pos))
+ code.putln("#endif") # CYTHON_PEP489_MULTI_PHASE_INIT
+
+ code.putln(
+ "%s = PyModule_GetDict(%s); %s" % (
+ env.module_dict_cname, env.module_cname,
+ code.error_goto_if_null(env.module_dict_cname, self.pos)))
+ code.put_incref(env.module_dict_cname, py_object_type, nanny=False)
+
+ code.putln(
+ '%s = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); %s' % (
+ Naming.builtins_cname,
+ code.error_goto_if_null(Naming.builtins_cname, self.pos)))
+ code.put_incref(Naming.builtins_cname, py_object_type, nanny=False)
+ code.putln(
+ '%s = PyImport_AddModule((char *) "cython_runtime"); %s' % (
+ Naming.cython_runtime_cname,
+ code.error_goto_if_null(Naming.cython_runtime_cname, self.pos)))
+ code.put_incref(Naming.cython_runtime_cname, py_object_type, nanny=False)
+ code.putln(
+ 'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s' % (
+ env.module_cname,
+ Naming.builtins_cname,
+ code.error_goto(self.pos)))
+ if Options.pre_import is not None:
+ code.putln(
+ '%s = PyImport_AddModule("%s"); %s' % (
+ Naming.preimport_cname,
+ Options.pre_import,
+ code.error_goto_if_null(Naming.preimport_cname, self.pos)))
+ code.put_incref(Naming.preimport_cname, py_object_type, nanny=False)
+
+ def generate_global_init_code(self, env, code):
+ # Generate code to initialise global PyObject *
+ # variables to None.
+ for entry in env.var_entries:
+ if entry.visibility != 'extern':
+ if entry.used:
+ entry.type.global_init_code(entry, code)
+
+ def generate_wrapped_entries_code(self, env, code):
+ for name, entry in sorted(env.entries.items()):
+ if (entry.create_wrapper
+ and not entry.is_type
+ and entry.scope is env):
+ if not entry.type.create_to_py_utility_code(env):
+ error(entry.pos, "Cannot convert '%s' to Python object" % entry.type)
+ code.putln("{")
+ code.putln("PyObject* wrapped = %s(%s);" % (
+ entry.type.to_py_function,
+ entry.cname))
+ code.putln(code.error_goto_if_null("wrapped", entry.pos))
+ code.putln(
+ 'if (PyObject_SetAttrString(%s, "%s", wrapped) < 0) %s;' % (
+ env.module_cname,
+ name,
+ code.error_goto(entry.pos)))
+ code.putln("}")
+
+ def generate_c_variable_export_code(self, env, code):
+ # Generate code to create PyCFunction wrappers for exported C functions.
+ entries = []
+ for entry in env.var_entries:
+ if (entry.api
+ or entry.defined_in_pxd
+ or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
+ entries.append(entry)
+ if entries:
+ env.use_utility_code(UtilityCode.load_cached("VoidPtrExport", "ImportExport.c"))
+ for entry in entries:
+ signature = entry.type.empty_declaration_code()
+ name = code.intern_identifier(entry.name)
+ code.putln('if (__Pyx_ExportVoidPtr(%s, (void *)&%s, "%s") < 0) %s' % (
+ name, entry.cname, signature,
+ code.error_goto(self.pos)))
+
+ def generate_c_function_export_code(self, env, code):
+ # Generate code to create PyCFunction wrappers for exported C functions.
+ entries = []
+ for entry in env.cfunc_entries:
+ if (entry.api
+ or entry.defined_in_pxd
+ or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
+ entries.append(entry)
+ if entries:
+ env.use_utility_code(
+ UtilityCode.load_cached("FunctionExport", "ImportExport.c"))
+ # Note: while this looks like it could be more cheaply stored and read from a struct array,
+ # investigation shows that the resulting binary is smaller with repeated functions calls.
+ for entry in entries:
+ signature = entry.type.signature_string()
+ code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % (
+ entry.name,
+ entry.cname,
+ signature,
+ code.error_goto(self.pos)))
+
+ def generate_type_import_code_for_module(self, module, env, code):
+ # Generate type import code for all exported extension types in
+ # an imported module.
+ #if module.c_class_entries:
+ with ModuleImportGenerator(code) as import_generator:
+ for entry in module.c_class_entries:
+ if entry.defined_in_pxd:
+ self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator)
+
+ def specialize_fused_types(self, pxd_env):
+ """
+ If fused c(p)def functions are defined in an imported pxd, but not
+ used in this implementation file, we still have fused entries and
+ not specialized ones. This method replaces any fused entries with their
+ specialized ones.
+ """
+ for entry in pxd_env.cfunc_entries[:]:
+ if entry.type.is_fused:
+ # This call modifies the cfunc_entries in-place
+ entry.type.get_all_specialized_function_types()
+
+ def generate_c_variable_import_code_for_module(self, module, env, code):
+ # Generate import code for all exported C functions in a cimported module.
+ entries = []
+ for entry in module.var_entries:
+ if entry.defined_in_pxd:
+ entries.append(entry)
+ if entries:
+ env.use_utility_code(
+ UtilityCode.load_cached("VoidPtrImport", "ImportExport.c"))
+ temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln(
+ '%s = PyImport_ImportModule("%s"); if (!%s) %s' % (
+ temp,
+ module.qualified_name,
+ temp,
+ code.error_goto(self.pos)))
+ code.put_gotref(temp)
+ for entry in entries:
+ if env is module:
+ cname = entry.cname
+ else:
+ cname = module.mangle(Naming.varptr_prefix, entry.name)
+ signature = entry.type.empty_declaration_code()
+ code.putln(
+ 'if (__Pyx_ImportVoidPtr(%s, "%s", (void **)&%s, "%s") < 0) %s' % (
+ temp, entry.name, cname, signature,
+ code.error_goto(self.pos)))
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+
+ def generate_c_function_import_code_for_module(self, module, env, code):
+ # Generate import code for all exported C functions in a cimported module.
+ entries = []
+ for entry in module.cfunc_entries:
+ if entry.defined_in_pxd and entry.used:
+ entries.append(entry)
+ if entries:
+ env.use_utility_code(
+ UtilityCode.load_cached("FunctionImport", "ImportExport.c"))
+ temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln(
+ '%s = PyImport_ImportModule("%s"); if (!%s) %s' % (
+ temp,
+ module.qualified_name,
+ temp,
+ code.error_goto(self.pos)))
+ code.put_gotref(temp)
+ for entry in entries:
+ code.putln(
+ 'if (__Pyx_ImportFunction(%s, "%s", (void (**)(void))&%s, "%s") < 0) %s' % (
+ temp,
+ entry.name,
+ entry.cname,
+ entry.type.signature_string(),
+ code.error_goto(self.pos)))
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+
+ def generate_type_init_code(self, env, code):
+ # Generate type import code for extern extension types
+ # and type ready code for non-extern ones.
+ with ModuleImportGenerator(code) as import_generator:
+ for entry in env.c_class_entries:
+ if entry.visibility == 'extern' and not entry.utility_code_definition:
+ self.generate_type_import_code(env, entry.type, entry.pos, code, import_generator)
+ else:
+ self.generate_base_type_import_code(env, entry, code, import_generator)
+ self.generate_exttype_vtable_init_code(entry, code)
+ if entry.type.early_init:
+ self.generate_type_ready_code(entry, code)
+
+ def generate_base_type_import_code(self, env, entry, code, import_generator):
+ base_type = entry.type.base_type
+ if (base_type and base_type.module_name != env.qualified_name and not
+ base_type.is_builtin_type and not entry.utility_code_definition):
+ self.generate_type_import_code(env, base_type, self.pos, code, import_generator)
+
+ def generate_type_import_code(self, env, type, pos, code, import_generator):
+ # If not already done, generate code to import the typeobject of an
+ # extension type defined in another module, and extract its C method
+ # table pointer if any.
+ if type in env.types_imported:
+ return
+ if type.name not in Code.ctypedef_builtins_map:
+ # see corresponding condition in generate_type_import_call() below!
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("TypeImport", "ImportExport.c"))
+ self.generate_type_import_call(type, code, import_generator, error_pos=pos)
+ if type.vtabptr_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('GetVTable', 'ImportExport.c'))
+ code.putln("%s = (struct %s*)__Pyx_GetVtable(%s->tp_dict); %s" % (
+ type.vtabptr_cname,
+ type.vtabstruct_cname,
+ type.typeptr_cname,
+ code.error_goto_if_null(type.vtabptr_cname, pos)))
+ env.types_imported.add(type)
+
+ def generate_type_import_call(self, type, code, import_generator, error_code=None, error_pos=None):
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ sizeof_objstruct = objstruct
+ module_name = type.module_name
+ condition = replacement = None
+ if module_name not in ('__builtin__', 'builtins'):
+ module_name = '"%s"' % module_name
+ elif type.name in Code.ctypedef_builtins_map:
+ # Fast path for special builtins, don't actually import
+ ctypename = Code.ctypedef_builtins_map[type.name]
+ code.putln('%s = %s;' % (type.typeptr_cname, ctypename))
+ return
+ else:
+ module_name = '__Pyx_BUILTIN_MODULE_NAME'
+ if type.name in Code.non_portable_builtins_map:
+ condition, replacement = Code.non_portable_builtins_map[type.name]
+ if objstruct in Code.basicsize_builtins_map:
+ # Some builtin types have a tp_basicsize which differs from sizeof(...):
+ sizeof_objstruct = Code.basicsize_builtins_map[objstruct]
+
+ if not error_code:
+ assert error_pos is not None
+ error_code = code.error_goto(error_pos)
+
+ module = import_generator.imported_module(module_name, error_code)
+ code.put('%s = __Pyx_ImportType(%s, %s,' % (
+ type.typeptr_cname,
+ module,
+ module_name))
+
+ if condition and replacement:
+ code.putln("") # start in new line
+ code.putln("#if %s" % condition)
+ code.putln('"%s",' % replacement)
+ code.putln("#else")
+ code.putln('"%s",' % type.name)
+ code.putln("#endif")
+ else:
+ code.put(' "%s", ' % type.name)
+
+ if sizeof_objstruct != objstruct:
+ if not condition:
+ code.putln("") # start in new line
+ code.putln("#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000")
+ code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT(%s),' % (objstruct, objstruct))
+ code.putln("#else")
+ code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT(%s),' % (sizeof_objstruct, sizeof_objstruct))
+ code.putln("#endif")
+ else:
+ code.putln('sizeof(%s), __PYX_GET_STRUCT_ALIGNMENT(%s),' % (objstruct, objstruct))
+
+ # check_size
+ if type.check_size and type.check_size in ('error', 'warn', 'ignore'):
+ check_size = type.check_size
+ elif not type.is_external or type.is_subclassed:
+ check_size = 'error'
+ else:
+ raise RuntimeError("invalid value for check_size '%s' when compiling %s.%s" % (
+ type.check_size, module_name, type.name))
+ code.putln('__Pyx_ImportType_CheckSize_%s);' % check_size.title())
+
+ code.putln(' if (!%s) %s' % (type.typeptr_cname, error_code))
+
+ def generate_type_ready_code(self, entry, code):
+ Nodes.CClassDefNode.generate_type_ready_code(entry, code)
+
+ def generate_exttype_vtable_init_code(self, entry, code):
+ # Generate code to initialise the C method table of an
+ # extension type.
+ type = entry.type
+ if type.vtable_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.vtabptr_cname,
+ type.vtable_cname))
+ if type.base_type and type.base_type.vtabptr_cname:
+ code.putln(
+ "%s.%s = *%s;" % (
+ type.vtable_cname,
+ Naming.obj_base_cname,
+ type.base_type.vtabptr_cname))
+
+ c_method_entries = [
+ entry for entry in type.scope.cfunc_entries
+ if entry.func_cname]
+ if c_method_entries:
+ for meth_entry in c_method_entries:
+ cast = meth_entry.type.signature_cast_string()
+ code.putln(
+ "%s.%s = %s%s;" % (
+ type.vtable_cname,
+ meth_entry.cname,
+ cast,
+ meth_entry.func_cname))
+
+
+class ModuleImportGenerator(object):
+ """
+ Helper to generate module import while importing external types.
+ This is used to avoid excessive re-imports of external modules when multiple types are looked up.
+ """
+ def __init__(self, code, imported_modules=None):
+ self.code = code
+ self.imported = {}
+ if imported_modules:
+ for name, cname in imported_modules.items():
+ self.imported['"%s"' % name] = cname
+ self.temps = [] # remember original import order for freeing
+
+ def imported_module(self, module_name_string, error_code):
+ if module_name_string in self.imported:
+ return self.imported[module_name_string]
+
+ code = self.code
+ temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ self.temps.append(temp)
+ code.putln('%s = PyImport_ImportModule(%s); if (unlikely(!%s)) %s' % (
+ temp, module_name_string, temp, error_code))
+ code.put_gotref(temp)
+ self.imported[module_name_string] = temp
+ return temp
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc):
+ code = self.code
+ for temp in self.temps:
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+
+
+def generate_cfunction_declaration(entry, env, code, definition):
+ from_cy_utility = entry.used and entry.utility_code_definition
+ if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and (
+ definition or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)):
+ if entry.visibility == 'extern':
+ storage_class = Naming.extern_c_macro
+ dll_linkage = "DL_IMPORT"
+ elif entry.visibility == 'public':
+ storage_class = Naming.extern_c_macro
+ dll_linkage = None
+ elif entry.visibility == 'private':
+ storage_class = "static"
+ dll_linkage = None
+ else:
+ storage_class = "static"
+ dll_linkage = None
+ type = entry.type
+
+ if entry.defined_in_pxd and not definition:
+ storage_class = "static"
+ dll_linkage = None
+ type = CPtrType(type)
+
+ header = type.declaration_code(
+ entry.cname, dll_linkage=dll_linkage)
+ modifiers = code.build_function_modifiers(entry.func_modifiers)
+ code.putln("%s %s%s; /*proto*/" % (
+ storage_class,
+ modifiers,
+ header))
+
+#------------------------------------------------------------------------------------
+#
+# Runtime support code
+#
+#------------------------------------------------------------------------------------
+
+refnanny_utility_code = UtilityCode.load("Refnanny", "ModuleSetupCode.c")
+
+packed_struct_utility_code = UtilityCode(proto="""
+#if defined(__GNUC__)
+#define __Pyx_PACKED __attribute__((__packed__))
+#else
+#define __Pyx_PACKED
+#endif
+""", impl="", proto_block='utility_code_proto_before_types')
+
+capsule_utility_code = UtilityCode.load("Capsule")
diff --git a/contrib/tools/cython/Cython/Compiler/Naming.py b/contrib/tools/cython/Cython/Compiler/Naming.py
new file mode 100644
index 0000000000..2c9b620788
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Naming.py
@@ -0,0 +1,162 @@
+#
+# C naming conventions
+#
+#
+# Prefixes for generating C names.
+# Collected here to facilitate ensuring uniqueness.
+#
+
+pyrex_prefix = "__pyx_"
+
+
+codewriter_temp_prefix = pyrex_prefix + "t_"
+
+temp_prefix = u"__cyt_"
+
+builtin_prefix = pyrex_prefix + "builtin_"
+arg_prefix = pyrex_prefix + "arg_"
+funcdoc_prefix = pyrex_prefix + "doc_"
+enum_prefix = pyrex_prefix + "e_"
+func_prefix = pyrex_prefix + "f_"
+func_prefix_api = pyrex_prefix + "api_f_"
+pyfunc_prefix = pyrex_prefix + "pf_"
+pywrap_prefix = pyrex_prefix + "pw_"
+genbody_prefix = pyrex_prefix + "gb_"
+gstab_prefix = pyrex_prefix + "getsets_"
+prop_get_prefix = pyrex_prefix + "getprop_"
+const_prefix = pyrex_prefix + "k_"
+py_const_prefix = pyrex_prefix + "kp_"
+label_prefix = pyrex_prefix + "L"
+pymethdef_prefix = pyrex_prefix + "mdef_"
+method_wrapper_prefix = pyrex_prefix + "specialmethod_"
+methtab_prefix = pyrex_prefix + "methods_"
+memtab_prefix = pyrex_prefix + "members_"
+objstruct_prefix = pyrex_prefix + "obj_"
+typeptr_prefix = pyrex_prefix + "ptype_"
+prop_set_prefix = pyrex_prefix + "setprop_"
+type_prefix = pyrex_prefix + "t_"
+typeobj_prefix = pyrex_prefix + "type_"
+var_prefix = pyrex_prefix + "v_"
+varptr_prefix = pyrex_prefix + "vp_"
+varptr_prefix_api = pyrex_prefix + "api_vp_"
+wrapperbase_prefix= pyrex_prefix + "wrapperbase_"
+pybuffernd_prefix = pyrex_prefix + "pybuffernd_"
+pybufferstruct_prefix = pyrex_prefix + "pybuffer_"
+vtable_prefix = pyrex_prefix + "vtable_"
+vtabptr_prefix = pyrex_prefix + "vtabptr_"
+vtabstruct_prefix = pyrex_prefix + "vtabstruct_"
+opt_arg_prefix = pyrex_prefix + "opt_args_"
+convert_func_prefix = pyrex_prefix + "convert_"
+closure_scope_prefix = pyrex_prefix + "scope_"
+closure_class_prefix = pyrex_prefix + "scope_struct_"
+lambda_func_prefix = pyrex_prefix + "lambda_"
+module_is_main = pyrex_prefix + "module_is_main_"
+defaults_struct_prefix = pyrex_prefix + "defaults"
+dynamic_args_cname = pyrex_prefix + "dynamic_args"
+
+interned_prefixes = {
+ 'str': pyrex_prefix + "n_",
+ 'int': pyrex_prefix + "int_",
+ 'float': pyrex_prefix + "float_",
+ 'tuple': pyrex_prefix + "tuple_",
+ 'codeobj': pyrex_prefix + "codeobj_",
+ 'slice': pyrex_prefix + "slice_",
+ 'ustring': pyrex_prefix + "ustring_",
+ 'umethod': pyrex_prefix + "umethod_",
+}
+
+ctuple_type_prefix = pyrex_prefix + "ctuple_"
+args_cname = pyrex_prefix + "args"
+generator_cname = pyrex_prefix + "generator"
+sent_value_cname = pyrex_prefix + "sent_value"
+pykwdlist_cname = pyrex_prefix + "pyargnames"
+obj_base_cname = pyrex_prefix + "base"
+builtins_cname = pyrex_prefix + "b"
+preimport_cname = pyrex_prefix + "i"
+moddict_cname = pyrex_prefix + "d"
+dummy_cname = pyrex_prefix + "dummy"
+filename_cname = pyrex_prefix + "filename"
+modulename_cname = pyrex_prefix + "modulename"
+filetable_cname = pyrex_prefix + "f"
+intern_tab_cname = pyrex_prefix + "intern_tab"
+kwds_cname = pyrex_prefix + "kwds"
+lineno_cname = pyrex_prefix + "lineno"
+clineno_cname = pyrex_prefix + "clineno"
+cfilenm_cname = pyrex_prefix + "cfilenm"
+local_tstate_cname = pyrex_prefix + "tstate"
+module_cname = pyrex_prefix + "m"
+moddoc_cname = pyrex_prefix + "mdoc"
+methtable_cname = pyrex_prefix + "methods"
+retval_cname = pyrex_prefix + "r"
+reqd_kwds_cname = pyrex_prefix + "reqd_kwds"
+self_cname = pyrex_prefix + "self"
+stringtab_cname = pyrex_prefix + "string_tab"
+vtabslot_cname = pyrex_prefix + "vtab"
+c_api_tab_cname = pyrex_prefix + "c_api_tab"
+gilstate_cname = pyrex_prefix + "state"
+skip_dispatch_cname = pyrex_prefix + "skip_dispatch"
+empty_tuple = pyrex_prefix + "empty_tuple"
+empty_bytes = pyrex_prefix + "empty_bytes"
+empty_unicode = pyrex_prefix + "empty_unicode"
+print_function = pyrex_prefix + "print"
+print_function_kwargs = pyrex_prefix + "print_kwargs"
+cleanup_cname = pyrex_prefix + "module_cleanup"
+pymoduledef_cname = pyrex_prefix + "moduledef"
+pymoduledef_slots_cname = pyrex_prefix + "moduledef_slots"
+pymodinit_module_arg = pyrex_prefix + "pyinit_module"
+pymodule_create_func_cname = pyrex_prefix + "pymod_create"
+pymodule_exec_func_cname = pyrex_prefix + "pymod_exec"
+optional_args_cname = pyrex_prefix + "optional_args"
+import_star = pyrex_prefix + "import_star"
+import_star_set = pyrex_prefix + "import_star_set"
+outer_scope_cname= pyrex_prefix + "outer_scope"
+cur_scope_cname = pyrex_prefix + "cur_scope"
+enc_scope_cname = pyrex_prefix + "enc_scope"
+frame_cname = pyrex_prefix + "frame"
+frame_code_cname = pyrex_prefix + "frame_code"
+binding_cfunc = pyrex_prefix + "binding_PyCFunctionType"
+fused_func_prefix = pyrex_prefix + 'fuse_'
+quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty temping
+tp_dict_version_temp = pyrex_prefix + "tp_dict_version"
+obj_dict_version_temp = pyrex_prefix + "obj_dict_version"
+type_dict_guard_temp = pyrex_prefix + "type_dict_guard"
+cython_runtime_cname = pyrex_prefix + "cython_runtime"
+
+global_code_object_cache_find = pyrex_prefix + 'find_code_object'
+global_code_object_cache_insert = pyrex_prefix + 'insert_code_object'
+
+genexpr_id_ref = 'genexpr'
+freelist_name = 'freelist'
+freecount_name = 'freecount'
+
+line_c_macro = "__LINE__"
+
+file_c_macro = "__FILE__"
+
+extern_c_macro = pyrex_prefix.upper() + "EXTERN_C"
+
+exc_type_name = pyrex_prefix + "exc_type"
+exc_value_name = pyrex_prefix + "exc_value"
+exc_tb_name = pyrex_prefix + "exc_tb"
+exc_lineno_name = pyrex_prefix + "exc_lineno"
+
+parallel_exc_type = pyrex_prefix + "parallel_exc_type"
+parallel_exc_value = pyrex_prefix + "parallel_exc_value"
+parallel_exc_tb = pyrex_prefix + "parallel_exc_tb"
+parallel_filename = pyrex_prefix + "parallel_filename"
+parallel_lineno = pyrex_prefix + "parallel_lineno"
+parallel_clineno = pyrex_prefix + "parallel_clineno"
+parallel_why = pyrex_prefix + "parallel_why"
+
+exc_vars = (exc_type_name, exc_value_name, exc_tb_name)
+
+api_name = pyrex_prefix + "capi__"
+
+h_guard_prefix = "__PYX_HAVE__"
+api_guard_prefix = "__PYX_HAVE_API__"
+api_func_guard = "__PYX_HAVE_API_FUNC_"
+
+PYX_NAN = "__PYX_NAN()"
+
+def py_version_hex(major, minor=0, micro=0, release_level=0, release_serial=0):
+ return (major << 24) | (minor << 16) | (micro << 8) | (release_level << 4) | (release_serial)
diff --git a/contrib/tools/cython/Cython/Compiler/Nodes.py b/contrib/tools/cython/Cython/Compiler/Nodes.py
new file mode 100644
index 0000000000..f57ddf3dd3
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Nodes.py
@@ -0,0 +1,9456 @@
+#
+# Parse tree nodes
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(sys=object, os=object, copy=object,
+ Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
+ py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
+ StructOrUnionScope=object, PyClassScope=object,
+ CppClassScope=object, UtilityCode=object, EncodedString=object,
+ error_type=object, _py_int_types=object)
+
+import sys, os, copy
+from itertools import chain
+
+from . import Builtin
+from .Errors import error, warning, InternalError, CompileError
+from . import Naming
+from . import PyrexTypes
+from . import TypeSlots
+from .PyrexTypes import py_object_type, error_type
+from .Symtab import (ModuleScope, LocalScope, ClosureScope,
+ StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
+from .Code import UtilityCode
+from .StringEncoding import EncodedString
+from . import Future
+from . import Options
+from . import DebugFlags
+from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
+from ..Utils import add_metaclass
+
+
+if sys.version_info[0] >= 3:
+ _py_int_types = int
+else:
+ _py_int_types = (int, long)
+
+
+def relative_position(pos):
+ return (pos[0].get_filenametable_entry(), pos[1])
+
+
+def embed_position(pos, docstring):
+ if not Options.embed_pos_in_docstring:
+ return docstring
+ pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
+ if docstring is None:
+ # unicode string
+ return EncodedString(pos_line)
+
+ # make sure we can encode the filename in the docstring encoding
+ # otherwise make the docstring a unicode string
+ encoding = docstring.encoding
+ if encoding is not None:
+ try:
+ pos_line.encode(encoding)
+ except UnicodeEncodeError:
+ encoding = None
+
+ if not docstring:
+ # reuse the string encoding of the original docstring
+ doc = EncodedString(pos_line)
+ else:
+ doc = EncodedString(pos_line + u'\n' + docstring)
+ doc.encoding = encoding
+ return doc
+
+
+def analyse_type_annotation(annotation, env, assigned_value=None):
+ base_type = None
+ is_ambiguous = False
+ explicit_pytype = explicit_ctype = False
+ if annotation.is_dict_literal:
+ warning(annotation.pos,
+ "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
+ for name, value in annotation.key_value_pairs:
+ if not name.is_string_literal:
+ continue
+ if name.value in ('type', b'type'):
+ explicit_pytype = True
+ if not explicit_ctype:
+ annotation = value
+ elif name.value in ('ctype', b'ctype'):
+ explicit_ctype = True
+ annotation = value
+ if explicit_pytype and explicit_ctype:
+ warning(annotation.pos, "Duplicate type declarations found in signature annotation")
+ arg_type = annotation.analyse_as_type(env)
+ if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
+ # Map builtin numeric Python types to C types in safe cases.
+ if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
+ assigned_type = assigned_value.infer_type(env)
+ if assigned_type and assigned_type.is_pyobject:
+ # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
+ is_ambiguous = True
+ arg_type = None
+ # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
+ if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
+ arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
+ elif arg_type is not None and annotation.is_string_literal:
+ warning(annotation.pos,
+ "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
+ if arg_type is not None:
+ if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
+ warning(annotation.pos,
+ "Python type declaration in signature annotation does not refer to a Python type")
+ base_type = CAnalysedBaseTypeNode(
+ annotation.pos, type=arg_type, is_arg=True)
+ elif is_ambiguous:
+ warning(annotation.pos, "Ambiguous types in annotation, ignoring")
+ else:
+ warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
+ return base_type, arg_type
+
+
+def write_func_call(func, codewriter_class):
+ def f(*args, **kwds):
+ if len(args) > 1 and isinstance(args[1], codewriter_class):
+ # here we annotate the code with this function call
+ # but only if new code is generated
+ node, code = args[:2]
+ marker = ' /* %s -> %s.%s %s */' % (
+ ' ' * code.call_level,
+ node.__class__.__name__,
+ func.__name__,
+ node.pos[1:])
+ pristine = code.buffer.stream.tell()
+ code.putln(marker)
+ start = code.buffer.stream.tell()
+ code.call_level += 4
+ res = func(*args, **kwds)
+ code.call_level -= 4
+ if start == code.buffer.stream.tell():
+ # no code written => undo writing marker
+ code.buffer.stream.truncate(pristine)
+ else:
+ marker = marker.replace('->', '<-', 1)
+ code.putln(marker)
+ return res
+ else:
+ return func(*args, **kwds)
+ return f
+
+
+class VerboseCodeWriter(type):
+ # Set this as a metaclass to trace function calls in code.
+ # This slows down code generation and makes much larger files.
+ def __new__(cls, name, bases, attrs):
+ from types import FunctionType
+ from .Code import CCodeWriter
+ attrs = dict(attrs)
+ for mname, m in attrs.items():
+ if isinstance(m, FunctionType):
+ attrs[mname] = write_func_call(m, CCodeWriter)
+ return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
+
+
+class CheckAnalysers(type):
+ """Metaclass to check that type analysis functions return a node.
+ """
+ methods = set(['analyse_types',
+ 'analyse_expressions',
+ 'analyse_target_types'])
+
+ def __new__(cls, name, bases, attrs):
+ from types import FunctionType
+ def check(name, func):
+ def call(*args, **kwargs):
+ retval = func(*args, **kwargs)
+ if retval is None:
+ print('%s %s %s' % (name, args, kwargs))
+ return retval
+ return call
+
+ attrs = dict(attrs)
+ for mname, m in attrs.items():
+ if isinstance(m, FunctionType) and mname in cls.methods:
+ attrs[mname] = check(mname, m)
+ return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
+
+
+def _with_metaclass(cls):
+ if DebugFlags.debug_trace_code_generation:
+ return add_metaclass(VerboseCodeWriter)(cls)
+ #return add_metaclass(CheckAnalysers)(cls)
+ return cls
+
+
+@_with_metaclass
+class Node(object):
+ # pos (string, int, int) Source file position
+ # is_name boolean Is a NameNode
+ # is_literal boolean Is a ConstNode
+
+ is_name = 0
+ is_none = 0
+ is_nonecheck = 0
+ is_literal = 0
+ is_terminator = 0
+ is_wrapper = False # is a DefNode wrapper for a C function
+ temps = None
+
+ # All descendants should set child_attrs to a list of the attributes
+ # containing nodes considered "children" in the tree. Each such attribute
+ # can either contain a single node or a list of nodes. See Visitor.py.
+ child_attrs = None
+
+ # Subset of attributes that are evaluated in the outer scope (e.g. function default arguments).
+ outer_attrs = None
+
+ cf_state = None
+
+ # This may be an additional (or 'actual') type that will be checked when
+ # this node is coerced to another type. This could be useful to set when
+ # the actual type to which it can coerce is known, but you want to leave
+ # the type a py_object_type
+ coercion_type = None
+
+ def __init__(self, pos, **kw):
+ self.pos = pos
+ self.__dict__.update(kw)
+
+ gil_message = "Operation"
+
+ nogil_check = None
+ in_nogil_context = False # For use only during code generation.
+
+ def gil_error(self, env=None):
+ error(self.pos, "%s not allowed without gil" % self.gil_message)
+
+ cpp_message = "Operation"
+
+ def cpp_check(self, env):
+ if not env.is_cpp():
+ self.cpp_error()
+
+ def cpp_error(self):
+ error(self.pos, "%s only allowed in c++" % self.cpp_message)
+
+ def clone_node(self):
+ """Clone the node. This is defined as a shallow copy, except for member lists
+ amongst the child attributes (from get_child_accessors) which are also
+ copied. Lists containing child nodes are thus seen as a way for the node
+ to hold multiple children directly; the list is not treated as a separate
+ level in the tree."""
+ result = copy.copy(self)
+ for attrname in result.child_attrs:
+ value = getattr(result, attrname)
+ if isinstance(value, list):
+ setattr(result, attrname, [x for x in value])
+ return result
+
+
+ #
+ # There are 3 phases of parse tree processing, applied in order to
+ # all the statements in a given scope-block:
+ #
+ # (0) analyse_declarations
+ # Make symbol table entries for all declarations at the current
+ # level, both explicit (def, cdef, etc.) and implicit (assignment
+ # to an otherwise undeclared name).
+ #
+ # (1) analyse_expressions
+ # Determine the result types of expressions and fill in the
+ # 'type' attribute of each ExprNode. Insert coercion nodes into the
+ # tree where needed to convert to and from Python objects.
+ # Allocate temporary locals for intermediate results. Fill
+ # in the 'result_code' attribute of each ExprNode with a C code
+ # fragment.
+ #
+ # (2) generate_code
+ # Emit C code for all declarations, statements and expressions.
+ # Recursively applies the 3 processing phases to the bodies of
+ # functions.
+ #
+
+ def analyse_declarations(self, env):
+ pass
+
+ def analyse_expressions(self, env):
+ raise InternalError("analyse_expressions not implemented for %s" % \
+ self.__class__.__name__)
+
+ def generate_code(self, code):
+ raise InternalError("generate_code not implemented for %s" % \
+ self.__class__.__name__)
+
+ def annotate(self, code):
+ # mro does the wrong thing
+ if isinstance(self, BlockNode):
+ self.body.annotate(code)
+
+ def end_pos(self):
+ try:
+ return self._end_pos
+ except AttributeError:
+ pos = self.pos
+ if not self.child_attrs:
+ self._end_pos = pos
+ return pos
+ for attr in self.child_attrs:
+ child = getattr(self, attr)
+ # Sometimes lists, sometimes nodes
+ if child is None:
+ pass
+ elif isinstance(child, list):
+ for c in child:
+ pos = max(pos, c.end_pos())
+ else:
+ pos = max(pos, child.end_pos())
+ self._end_pos = pos
+ return pos
+
+ def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
+ """Debug helper method that returns a recursive string representation of this node.
+ """
+ if cutoff == 0:
+ return "<...nesting level cutoff...>"
+ if encountered is None:
+ encountered = set()
+ if id(self) in encountered:
+ return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
+ encountered.add(id(self))
+
+ def dump_child(x, level):
+ if isinstance(x, Node):
+ return x.dump(level, filter_out, cutoff-1, encountered)
+ elif isinstance(x, list):
+ return "[%s]" % ", ".join([dump_child(item, level) for item in x])
+ else:
+ return repr(x)
+
+ attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
+ if len(attrs) == 0:
+ return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
+ else:
+ indent = " " * level
+ res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
+ for key, value in attrs:
+ res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
+ res += "%s>" % indent
+ return res
+
+ def dump_pos(self, mark_column=False, marker='(#)'):
+ """Debug helper method that returns the source code context of this node as a string.
+ """
+ if not self.pos:
+ return u''
+ source_desc, line, col = self.pos
+ contents = source_desc.get_lines(encoding='ASCII', error_handling='ignore')
+ # line numbers start at 1
+ lines = contents[max(0, line-3):line]
+ current = lines[-1]
+ if mark_column:
+ current = current[:col] + marker + current[col:]
+ lines[-1] = current.rstrip() + u' # <<<<<<<<<<<<<<\n'
+ lines += contents[line:line+2]
+ return u'"%s":%d:%d\n%s\n' % (
+ source_desc.get_escaped_description(), line, col, u''.join(lines))
+
+class CompilerDirectivesNode(Node):
+ """
+ Sets compiler directives for the children nodes
+ """
+ # directives {string:value} A dictionary holding the right value for
+ # *all* possible directives.
+ # body Node
+ child_attrs = ["body"]
+
+ def analyse_declarations(self, env):
+ old = env.directives
+ env.directives = self.directives
+ self.body.analyse_declarations(env)
+ env.directives = old
+
+ def analyse_expressions(self, env):
+ old = env.directives
+ env.directives = self.directives
+ self.body = self.body.analyse_expressions(env)
+ env.directives = old
+ return self
+
+ def generate_function_definitions(self, env, code):
+ env_old = env.directives
+ code_old = code.globalstate.directives
+ code.globalstate.directives = self.directives
+ self.body.generate_function_definitions(env, code)
+ env.directives = env_old
+ code.globalstate.directives = code_old
+
+ def generate_execution_code(self, code):
+ old = code.globalstate.directives
+ code.globalstate.directives = self.directives
+ self.body.generate_execution_code(code)
+ code.globalstate.directives = old
+
+ def annotate(self, code):
+ old = code.globalstate.directives
+ code.globalstate.directives = self.directives
+ self.body.annotate(code)
+ code.globalstate.directives = old
+
+class BlockNode(object):
+ # Mixin class for nodes representing a declaration block.
+
+ def generate_cached_builtins_decls(self, env, code):
+ entries = env.global_scope().undeclared_cached_builtins
+ for entry in entries:
+ code.globalstate.add_cached_builtin_decl(entry)
+ del entries[:]
+
+ def generate_lambda_definitions(self, env, code):
+ for node in env.lambda_defs:
+ node.generate_function_definitions(env, code)
+
+class StatListNode(Node):
+ # stats a list of StatNode
+
+ child_attrs = ["stats"]
+
+ @staticmethod
+ def create_analysed(pos, env, *args, **kw):
+ node = StatListNode(pos, *args, **kw)
+ return node # No node-specific analysis needed
+
+ def analyse_declarations(self, env):
+ #print "StatListNode.analyse_declarations" ###
+ for stat in self.stats:
+ stat.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ #print "StatListNode.analyse_expressions" ###
+ self.stats = [stat.analyse_expressions(env)
+ for stat in self.stats]
+ return self
+
+ def generate_function_definitions(self, env, code):
+ #print "StatListNode.generate_function_definitions" ###
+ for stat in self.stats:
+ stat.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ #print "StatListNode.generate_execution_code" ###
+ for stat in self.stats:
+ code.mark_pos(stat.pos)
+ stat.generate_execution_code(code)
+
+ def annotate(self, code):
+ for stat in self.stats:
+ stat.annotate(code)
+
+
+class StatNode(Node):
+ #
+ # Code generation for statements is split into the following subphases:
+ #
+ # (1) generate_function_definitions
+ # Emit C code for the definitions of any structs,
+ # unions, enums and functions defined in the current
+ # scope-block.
+ #
+ # (2) generate_execution_code
+ # Emit C code for executable statements.
+ #
+
+ def generate_function_definitions(self, env, code):
+ pass
+
+ def generate_execution_code(self, code):
+ raise InternalError("generate_execution_code not implemented for %s" % \
+ self.__class__.__name__)
+
+
+class CDefExternNode(StatNode):
+ # include_file string or None
+ # verbatim_include string or None
+ # body StatListNode
+
+ child_attrs = ["body"]
+
+ def analyse_declarations(self, env):
+ old_cinclude_flag = env.in_cinclude
+ env.in_cinclude = 1
+ self.body.analyse_declarations(env)
+ env.in_cinclude = old_cinclude_flag
+
+ if self.include_file or self.verbatim_include:
+ # Determine whether include should be late
+ stats = self.body.stats
+ if not env.directives['preliminary_late_includes_cy28']:
+ late = False
+ elif not stats:
+ # Special case: empty 'cdef extern' blocks are early
+ late = False
+ else:
+ late = all(isinstance(node, CVarDefNode) for node in stats)
+ env.add_include_file(self.include_file, self.verbatim_include, late)
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+ def annotate(self, code):
+ self.body.annotate(code)
+
+
+class CDeclaratorNode(Node):
+ # Part of a C declaration.
+ #
+ # Processing during analyse_declarations phase:
+ #
+ # analyse
+ # Returns (name, type) pair where name is the
+ # CNameDeclaratorNode of the name being declared
+ # and type is the type it is being declared as.
+ #
+ # calling_convention string Calling convention of CFuncDeclaratorNode
+ # for which this is a base
+
+ child_attrs = []
+
+ calling_convention = ""
+
+ def analyse_templates(self):
+ # Only C++ functions have templates.
+ return None
+
+
+class CNameDeclaratorNode(CDeclaratorNode):
+ # name string The Cython name being declared
+ # cname string or None C name, if specified
+ # default ExprNode or None the value assigned on declaration
+
+ child_attrs = ['default']
+
+ default = None
+
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if nonempty and self.name == '':
+ # May have mistaken the name for the type.
+ if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
+ error(self.pos, "Missing argument name")
+ elif base_type.is_void:
+ error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
+ else:
+ self.name = base_type.declaration_code("", for_display=1, pyrex=1)
+ base_type = py_object_type
+
+ if base_type.is_fused and env.fused_to_specific:
+ base_type = base_type.specialize(env.fused_to_specific)
+
+ self.type = base_type
+ return self, base_type
+
+
+class CPtrDeclaratorNode(CDeclaratorNode):
+ # base CDeclaratorNode
+
+ child_attrs = ["base"]
+
+ def analyse_templates(self):
+ return self.base.analyse_templates()
+
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if base_type.is_pyobject:
+ error(self.pos, "Pointer base type cannot be a Python object")
+ ptr_type = PyrexTypes.c_ptr_type(base_type)
+ return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+
+
+class CReferenceDeclaratorNode(CDeclaratorNode):
+ # base CDeclaratorNode
+
+ child_attrs = ["base"]
+
+ def analyse_templates(self):
+ return self.base.analyse_templates()
+
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if base_type.is_pyobject:
+ error(self.pos, "Reference base type cannot be a Python object")
+ ref_type = PyrexTypes.c_ref_type(base_type)
+ return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+
+
+class CArrayDeclaratorNode(CDeclaratorNode):
+ # base CDeclaratorNode
+ # dimension ExprNode
+
+ child_attrs = ["base", "dimension"]
+
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
+ from .ExprNodes import TupleNode
+ if isinstance(self.dimension, TupleNode):
+ args = self.dimension.args
+ else:
+ args = self.dimension,
+ values = [v.analyse_as_type(env) for v in args]
+ if None in values:
+ ix = values.index(None)
+ error(args[ix].pos, "Template parameter not a type")
+ base_type = error_type
+ else:
+ base_type = base_type.specialize_here(self.pos, values)
+ return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ if self.dimension:
+ self.dimension = self.dimension.analyse_const_expression(env)
+ if not self.dimension.type.is_int:
+ error(self.dimension.pos, "Array dimension not integer")
+ size = self.dimension.get_constant_c_result_code()
+ if size is not None:
+ try:
+ size = int(size)
+ except ValueError:
+ # runtime constant?
+ pass
+ else:
+ size = None
+ if not base_type.is_complete():
+ error(self.pos, "Array element type '%s' is incomplete" % base_type)
+ if base_type.is_pyobject:
+ error(self.pos, "Array element cannot be a Python object")
+ if base_type.is_cfunction:
+ error(self.pos, "Array element cannot be a function")
+ array_type = PyrexTypes.c_array_type(base_type, size)
+ return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+
+
+class CFuncDeclaratorNode(CDeclaratorNode):
+ # base CDeclaratorNode
+ # args [CArgDeclNode]
+ # templates [TemplatePlaceholderType]
+ # has_varargs boolean
+ # exception_value ConstNode
+ # exception_check boolean True if PyErr_Occurred check needed
+ # nogil boolean Can be called without gil
+ # with_gil boolean Acquire gil around function body
+ # is_const_method boolean Whether this is a const method
+
+ child_attrs = ["base", "args", "exception_value"]
+
+ overridable = 0
+ optional_arg_count = 0
+ is_const_method = 0
+ templates = None
+
+ def analyse_templates(self):
+ if isinstance(self.base, CArrayDeclaratorNode):
+ from .ExprNodes import TupleNode, NameNode
+ template_node = self.base.dimension
+ if isinstance(template_node, TupleNode):
+ template_nodes = template_node.args
+ elif isinstance(template_node, NameNode):
+ template_nodes = [template_node]
+ else:
+ error(template_node.pos, "Template arguments must be a list of names")
+ return None
+ self.templates = []
+ for template in template_nodes:
+ if isinstance(template, NameNode):
+ self.templates.append(PyrexTypes.TemplatePlaceholderType(template.name))
+ else:
+ error(template.pos, "Template arguments must be a list of names")
+ self.base = self.base.base
+ return self.templates
+ else:
+ return None
+
+ def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
+ if directive_locals is None:
+ directive_locals = {}
+ if nonempty:
+ nonempty -= 1
+ func_type_args = []
+ for i, arg_node in enumerate(self.args):
+ name_declarator, type = arg_node.analyse(
+ env, nonempty=nonempty,
+ is_self_arg=(i == 0 and env.is_c_class_scope and 'staticmethod' not in env.directives))
+ name = name_declarator.name
+ if name in directive_locals:
+ type_node = directive_locals[name]
+ other_type = type_node.analyse_as_type(env)
+ if other_type is None:
+ error(type_node.pos, "Not a type")
+ elif (type is not PyrexTypes.py_object_type
+ and not type.same_as(other_type)):
+ error(self.base.pos, "Signature does not agree with previous declaration")
+ error(type_node.pos, "Previous declaration here")
+ else:
+ type = other_type
+ if name_declarator.cname:
+ error(self.pos, "Function argument cannot have C name specification")
+ if i == 0 and env.is_c_class_scope and type.is_unspecified:
+ # fix the type of self
+ type = env.parent_type
+ # Turn *[] argument into **
+ if type.is_array:
+ type = PyrexTypes.c_ptr_type(type.base_type)
+ # Catch attempted C-style func(void) decl
+ if type.is_void:
+ error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
+ func_type_args.append(
+ PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
+ if arg_node.default:
+ self.optional_arg_count += 1
+ elif self.optional_arg_count:
+ error(self.pos, "Non-default argument follows default argument")
+
+ exc_val = None
+ exc_check = 0
+ if self.exception_check == '+':
+ env.add_include_file('ios') # for std::ios_base::failure
+ env.add_include_file('new') # for std::bad_alloc
+ env.add_include_file('stdexcept')
+ env.add_include_file('typeinfo') # for std::bad_cast
+ if (return_type.is_pyobject
+ and (self.exception_value or self.exception_check)
+ and self.exception_check != '+'):
+ error(self.pos, "Exception clause not allowed for function returning Python object")
+ else:
+ if self.exception_value is None and self.exception_check and self.exception_check != '+':
+ # Use an explicit exception return value to speed up exception checks.
+ # Even if it is not declared, we can use the default exception value of the return type,
+ # unless the function is some kind of external function that we do not control.
+ if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
+ # Extension types are more difficult because the signature must match the base type signature.
+ if not env.is_c_class_scope:
+ from .ExprNodes import ConstNode
+ self.exception_value = ConstNode(
+ self.pos, value=return_type.exception_value, type=return_type)
+ if self.exception_value:
+ self.exception_value = self.exception_value.analyse_const_expression(env)
+ if self.exception_check == '+':
+ exc_val_type = self.exception_value.type
+ if (not exc_val_type.is_error
+ and not exc_val_type.is_pyobject
+ and not (exc_val_type.is_cfunction
+ and not exc_val_type.return_type.is_pyobject
+ and not exc_val_type.args)
+ and not (exc_val_type == PyrexTypes.c_char_type
+ and self.exception_value.value == '*')):
+ error(self.exception_value.pos,
+ "Exception value must be a Python exception or cdef function with no arguments or *.")
+ exc_val = self.exception_value
+ else:
+ self.exception_value = self.exception_value.coerce_to(
+ return_type, env).analyse_const_expression(env)
+ exc_val = self.exception_value.get_constant_c_result_code()
+ if exc_val is None:
+ raise InternalError(
+ "get_constant_c_result_code not implemented for %s" %
+ self.exception_value.__class__.__name__)
+ if not return_type.assignable_from(self.exception_value.type):
+ error(self.exception_value.pos,
+ "Exception value incompatible with function return type")
+ exc_check = self.exception_check
+ if return_type.is_cfunction:
+ error(self.pos, "Function cannot return a function")
+ func_type = PyrexTypes.CFuncType(
+ return_type, func_type_args, self.has_varargs,
+ optional_arg_count=self.optional_arg_count,
+ exception_value=exc_val, exception_check=exc_check,
+ calling_convention=self.base.calling_convention,
+ nogil=self.nogil, with_gil=self.with_gil, is_overridable=self.overridable,
+ is_const_method=self.is_const_method,
+ templates=self.templates)
+
+ if self.optional_arg_count:
+ if func_type.is_fused:
+ # This is a bit of a hack... When we need to create specialized CFuncTypes
+ # on the fly because the cdef is defined in a pxd, we need to declare the specialized optional arg
+ # struct
+ def declare_opt_arg_struct(func_type, fused_cname):
+ self.declare_optional_arg_struct(func_type, env, fused_cname)
+
+ func_type.declare_opt_arg_struct = declare_opt_arg_struct
+ else:
+ self.declare_optional_arg_struct(func_type, env)
+
+ callspec = env.directives['callspec']
+ if callspec:
+ current = func_type.calling_convention
+ if current and current != callspec:
+ error(self.pos, "cannot have both '%s' and '%s' "
+ "calling conventions" % (current, callspec))
+ func_type.calling_convention = callspec
+ return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
+
+ def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
+ """
+ Declares the optional argument struct (the struct used to hold the
+ values for optional arguments). For fused cdef functions, this is
+ deferred as analyse_declarations is called only once (on the fused
+ cdef function).
+ """
+ scope = StructOrUnionScope()
+ arg_count_member = '%sn' % Naming.pyrex_prefix
+ scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
+
+ for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
+ scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
+
+ struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
+
+ if fused_cname is not None:
+ struct_cname = PyrexTypes.get_fused_cname(fused_cname, struct_cname)
+
+ op_args_struct = env.global_scope().declare_struct_or_union(
+ name=struct_cname,
+ kind='struct',
+ scope=scope,
+ typedef_flag=0,
+ pos=self.pos,
+ cname=struct_cname)
+
+ op_args_struct.defined_in_pxd = 1
+ op_args_struct.used = 1
+
+ func_type.op_arg_struct = PyrexTypes.c_ptr_type(op_args_struct.type)
+
+
+class CConstDeclaratorNode(CDeclaratorNode):
+ # base CDeclaratorNode
+
+ child_attrs = ["base"]
+
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ if base_type.is_pyobject:
+ error(self.pos,
+ "Const base type cannot be a Python object")
+ const = PyrexTypes.c_const_type(base_type)
+ return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+
+
+class CArgDeclNode(Node):
+ # Item in a function declaration argument list.
+ #
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+ # not_none boolean Tagged with 'not None'
+ # or_none boolean Tagged with 'or None'
+ # accept_none boolean Resolved boolean for not_none/or_none
+ # default ExprNode or None
+ # default_value PyObjectConst constant for default value
+ # annotation ExprNode or None Py3 function arg annotation
+ # is_self_arg boolean Is the "self" arg of an extension type method
+ # is_type_arg boolean Is the "class" arg of an extension type classmethod
+ # is_kw_only boolean Is a keyword-only argument
+ # is_dynamic boolean Non-literal arg stored inside CyFunction
+
+ child_attrs = ["base_type", "declarator", "default", "annotation"]
+ outer_attrs = ["default", "annotation"]
+
+ is_self_arg = 0
+ is_type_arg = 0
+ is_generic = 1
+ kw_only = 0
+ not_none = 0
+ or_none = 0
+ type = None
+ name_declarator = None
+ default_value = None
+ annotation = None
+ is_dynamic = 0
+
+ def analyse(self, env, nonempty=0, is_self_arg=False):
+ if is_self_arg:
+ self.base_type.is_self_arg = self.is_self_arg = True
+ if self.type is None:
+ # The parser may misinterpret names as types. We fix that here.
+ if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
+ if nonempty:
+ if self.base_type.is_basic_c_type:
+ # char, short, long called "int"
+ type = self.base_type.analyse(env, could_be_name=True)
+ arg_name = type.empty_declaration_code()
+ else:
+ arg_name = self.base_type.name
+ self.declarator.name = EncodedString(arg_name)
+ self.base_type.name = None
+ self.base_type.is_basic_c_type = False
+ could_be_name = True
+ else:
+ could_be_name = False
+ self.base_type.is_arg = True
+ base_type = self.base_type.analyse(env, could_be_name=could_be_name)
+ if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
+ self.declarator.name = self.base_type.arg_name
+
+ # The parser is unable to resolve the ambiguity of [] as part of the
+ # type (e.g. in buffers) or empty declarator (as with arrays).
+ # This is only arises for empty multi-dimensional arrays.
+ if (base_type.is_array
+ and isinstance(self.base_type, TemplatedTypeNode)
+ and isinstance(self.declarator, CArrayDeclaratorNode)):
+ declarator = self.declarator
+ while isinstance(declarator.base, CArrayDeclaratorNode):
+ declarator = declarator.base
+ declarator.base = self.base_type.array_declarator
+ base_type = base_type.base_type
+
+ # inject type declaration from annotations
+ # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
+ if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
+ arg_type = self.inject_type_from_annotations(env)
+ if arg_type is not None:
+ base_type = arg_type
+ return self.declarator.analyse(base_type, env, nonempty=nonempty)
+ else:
+ return self.name_declarator, self.type
+
+ def inject_type_from_annotations(self, env):
+ annotation = self.annotation
+ if not annotation:
+ return None
+ base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
+ if base_type is not None:
+ self.base_type = base_type
+ return arg_type
+
+ def calculate_default_value_code(self, code):
+ if self.default_value is None:
+ if self.default:
+ if self.default.is_literal:
+ # will not output any code, just assign the result_code
+ self.default.generate_evaluation_code(code)
+ return self.type.cast_code(self.default.result())
+ self.default_value = code.get_argument_default_const(self.type)
+ return self.default_value
+
+ def annotate(self, code):
+ if self.default:
+ self.default.annotate(code)
+
+ def generate_assignment_code(self, code, target=None, overloaded_assignment=False):
+ default = self.default
+ if default is None or default.is_literal:
+ return
+ if target is None:
+ target = self.calculate_default_value_code(code)
+ default.generate_evaluation_code(code)
+ default.make_owned_reference(code)
+ result = default.result() if overloaded_assignment else default.result_as(self.type)
+ code.putln("%s = %s;" % (target, result))
+ if self.type.is_pyobject:
+ code.put_giveref(default.result())
+ default.generate_post_assignment_code(code)
+ default.free_temps(code)
+
+
+class CBaseTypeNode(Node):
+ # Abstract base class for C base type nodes.
+ #
+ # Processing during analyse_declarations phase:
+ #
+ # analyse
+ # Returns the type.
+
+ def analyse_as_type(self, env):
+ return self.analyse(env)
+
+
+class CAnalysedBaseTypeNode(Node):
+ # type type
+
+ child_attrs = []
+
+ def analyse(self, env, could_be_name=False):
+ return self.type
+
+
+class CSimpleBaseTypeNode(CBaseTypeNode):
+ # name string
+ # module_path [string] Qualifying name components
+ # is_basic_c_type boolean
+ # signed boolean
+ # longness integer
+ # complex boolean
+ # is_self_arg boolean Is self argument of C method
+ # ##is_type_arg boolean Is type argument of class method
+
+ child_attrs = []
+ arg_name = None # in case the argument name was interpreted as a type
+ module_path = []
+ is_basic_c_type = False
+ complex = False
+
+ def analyse(self, env, could_be_name=False):
+ # Return type descriptor.
+ #print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
+ type = None
+ if self.is_basic_c_type:
+ type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
+ if not type:
+ error(self.pos, "Unrecognised type modifier combination")
+ elif self.name == "object" and not self.module_path:
+ type = py_object_type
+ elif self.name is None:
+ if self.is_self_arg and env.is_c_class_scope:
+ #print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
+ type = env.parent_type
+ ## elif self.is_type_arg and env.is_c_class_scope:
+ ## type = Builtin.type_type
+ else:
+ type = py_object_type
+ else:
+ if self.module_path:
+ # Maybe it's a nested C++ class.
+ scope = env
+ for item in self.module_path:
+ entry = scope.lookup(item)
+ if entry is not None and entry.is_cpp_class:
+ scope = entry.type.scope
+ else:
+ scope = None
+ break
+
+ if scope is None:
+ # Maybe it's a cimport.
+ scope = env.find_imported_module(self.module_path, self.pos)
+ else:
+ scope = env
+
+ if scope:
+ if scope.is_c_class_scope:
+ scope = scope.global_scope()
+
+ type = scope.lookup_type(self.name)
+ if type is not None:
+ pass
+ elif could_be_name:
+ if self.is_self_arg and env.is_c_class_scope:
+ type = env.parent_type
+ ## elif self.is_type_arg and env.is_c_class_scope:
+ ## type = Builtin.type_type
+ else:
+ type = py_object_type
+ self.arg_name = EncodedString(self.name)
+ else:
+ if self.templates:
+ if not self.name in self.templates:
+ error(self.pos, "'%s' is not a type identifier" % self.name)
+ type = PyrexTypes.TemplatePlaceholderType(self.name)
+ else:
+ error(self.pos, "'%s' is not a type identifier" % self.name)
+ if type and type.is_fused and env.fused_to_specific:
+ type = type.specialize(env.fused_to_specific)
+ if self.complex:
+ if not type.is_numeric or type.is_complex:
+ error(self.pos, "can only complexify c numeric types")
+ type = PyrexTypes.CComplexType(type)
+ type.create_declaration_utility_code(env)
+ elif type is Builtin.complex_type:
+ # Special case: optimise builtin complex type into C's
+ # double complex. The parser cannot do this (as for the
+ # normal scalar types) as the user may have redeclared the
+ # 'complex' type. Testing for the exact type here works.
+ type = PyrexTypes.c_double_complex_type
+ type.create_declaration_utility_code(env)
+ self.complex = True
+ if type:
+ return type
+ else:
+ return PyrexTypes.error_type
+
+class MemoryViewSliceTypeNode(CBaseTypeNode):
+
+ name = 'memoryview'
+ child_attrs = ['base_type_node', 'axes']
+
+ def analyse(self, env, could_be_name=False):
+
+ base_type = self.base_type_node.analyse(env)
+ if base_type.is_error: return base_type
+
+ from . import MemoryView
+
+ try:
+ axes_specs = MemoryView.get_axes_specs(env, self.axes)
+ except CompileError as e:
+ error(e.position, e.message_only)
+ self.type = PyrexTypes.ErrorType()
+ return self.type
+
+ if not MemoryView.validate_axes(self.pos, axes_specs):
+ self.type = error_type
+ else:
+ self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
+ self.type.validate_memslice_dtype(self.pos)
+ self.use_memview_utilities(env)
+
+ return self.type
+
+ def use_memview_utilities(self, env):
+ from . import MemoryView
+ env.use_utility_code(MemoryView.view_utility_code)
+
+
+class CNestedBaseTypeNode(CBaseTypeNode):
+ # For C++ classes that live inside other C++ classes.
+
+ # name string
+ # base_type CBaseTypeNode
+
+ child_attrs = ['base_type']
+
+ def analyse(self, env, could_be_name=None):
+ base_type = self.base_type.analyse(env)
+ if base_type is PyrexTypes.error_type:
+ return PyrexTypes.error_type
+ if not base_type.is_cpp_class:
+ error(self.pos, "'%s' is not a valid type scope" % base_type)
+ return PyrexTypes.error_type
+ type_entry = base_type.scope.lookup_here(self.name)
+ if not type_entry or not type_entry.is_type:
+ error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
+ return PyrexTypes.error_type
+ return type_entry.type
+
+
+class TemplatedTypeNode(CBaseTypeNode):
+ # After parsing:
+ # positional_args [ExprNode] List of positional arguments
+ # keyword_args DictNode Keyword arguments
+ # base_type_node CBaseTypeNode
+
+ # After analysis:
+ # type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
+
+ child_attrs = ["base_type_node", "positional_args",
+ "keyword_args", "dtype_node"]
+
+ dtype_node = None
+
+ name = None
+
+ def analyse(self, env, could_be_name=False, base_type=None):
+ if base_type is None:
+ base_type = self.base_type_node.analyse(env)
+ if base_type.is_error: return base_type
+
+ if base_type.is_cpp_class and base_type.is_template_type():
+ # Templated class
+ if self.keyword_args and self.keyword_args.key_value_pairs:
+ error(self.pos, "c++ templates cannot take keyword arguments")
+ self.type = PyrexTypes.error_type
+ else:
+ template_types = []
+ for template_node in self.positional_args:
+ type = template_node.analyse_as_type(env)
+ if type is None:
+ error(template_node.pos, "unknown type in template argument")
+ type = error_type
+ template_types.append(type)
+ self.type = base_type.specialize_here(self.pos, template_types)
+
+ elif base_type.is_pyobject:
+ # Buffer
+ from . import Buffer
+
+ options = Buffer.analyse_buffer_options(
+ self.pos,
+ env,
+ self.positional_args,
+ self.keyword_args,
+ base_type.buffer_defaults)
+
+ if sys.version_info[0] < 3:
+ # Py 2.x enforces byte strings as keyword arguments ...
+ options = dict([(name.encode('ASCII'), value)
+ for name, value in options.items()])
+
+ self.type = PyrexTypes.BufferType(base_type, **options)
+ if has_np_pythran(env) and is_pythran_buffer(self.type):
+ self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
+
+ else:
+ # Array
+ empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
+ if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
+ error(self.pos, "invalid array declaration")
+ self.type = PyrexTypes.error_type
+ else:
+ # It would be nice to merge this class with CArrayDeclaratorNode,
+ # but arrays are part of the declaration, not the type...
+ if not self.positional_args:
+ dimension = None
+ else:
+ dimension = self.positional_args[0]
+ self.array_declarator = CArrayDeclaratorNode(
+ self.pos,
+ base=empty_declarator,
+ dimension=dimension)
+ self.type = self.array_declarator.analyse(base_type, env)[1]
+
+ if self.type.is_fused and env.fused_to_specific:
+ self.type = self.type.specialize(env.fused_to_specific)
+
+ return self.type
+
+
+class CComplexBaseTypeNode(CBaseTypeNode):
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+
+ child_attrs = ["base_type", "declarator"]
+
+ def analyse(self, env, could_be_name=False):
+ base = self.base_type.analyse(env, could_be_name)
+ _, type = self.declarator.analyse(base, env)
+ return type
+
+
+class CTupleBaseTypeNode(CBaseTypeNode):
+ # components [CBaseTypeNode]
+
+ child_attrs = ["components"]
+
+ def analyse(self, env, could_be_name=False):
+ component_types = []
+ for c in self.components:
+ type = c.analyse(env)
+ if type.is_pyobject:
+ error(c.pos, "Tuple types can't (yet) contain Python objects.")
+ return error_type
+ component_types.append(type)
+ entry = env.declare_tuple_type(self.pos, component_types)
+ entry.used = True
+ return entry.type
+
+
+class FusedTypeNode(CBaseTypeNode):
+ """
+ Represents a fused type in a ctypedef statement:
+
+ ctypedef cython.fused_type(int, long, long long) integral
+
+ name str name of this fused type
+ types [CSimpleBaseTypeNode] is the list of types to be fused
+ """
+
+ child_attrs = []
+
+ def analyse_declarations(self, env):
+ type = self.analyse(env)
+ entry = env.declare_typedef(self.name, type, self.pos)
+
+ # Omit the typedef declaration that self.declarator would produce
+ entry.in_cinclude = True
+
+ def analyse(self, env, could_be_name=False):
+ types = []
+ for type_node in self.types:
+ type = type_node.analyse_as_type(env)
+
+ if not type:
+ error(type_node.pos, "Not a type")
+ continue
+
+ if type in types:
+ error(type_node.pos, "Type specified multiple times")
+ else:
+ types.append(type)
+
+ # if len(self.types) == 1:
+ # return types[0]
+
+ return PyrexTypes.FusedType(types, name=self.name)
+
+
+class CConstTypeNode(CBaseTypeNode):
+ # base_type CBaseTypeNode
+
+ child_attrs = ["base_type"]
+
+ def analyse(self, env, could_be_name=False):
+ base = self.base_type.analyse(env, could_be_name)
+ if base.is_pyobject:
+ error(self.pos,
+ "Const base type cannot be a Python object")
+ return PyrexTypes.c_const_type(base)
+
+
+class CVarDefNode(StatNode):
+ # C variable definition or forward/extern function declaration.
+ #
+ # visibility 'private' or 'public' or 'extern'
+ # base_type CBaseTypeNode
+ # declarators [CDeclaratorNode]
+ # in_pxd boolean
+ # api boolean
+ # overridable boolean whether it is a cpdef
+ # modifiers ['inline']
+
+ # decorators [cython.locals(...)] or None
+ # directive_locals { string : NameNode } locals defined by cython.locals(...)
+
+ child_attrs = ["base_type", "declarators"]
+
+ decorators = None
+ directive_locals = None
+
+ def analyse_declarations(self, env, dest_scope=None):
+ if self.directive_locals is None:
+ self.directive_locals = {}
+ if not dest_scope:
+ dest_scope = env
+ self.dest_scope = dest_scope
+
+ if self.declarators:
+ templates = self.declarators[0].analyse_templates()
+ else:
+ templates = None
+ if templates is not None:
+ if self.visibility != 'extern':
+ error(self.pos, "Only extern functions allowed")
+ if len(self.declarators) > 1:
+ error(self.declarators[1].pos, "Can't multiply declare template types")
+ env = TemplateScope('func_template', env)
+ env.directives = env.outer_scope.directives
+ for template_param in templates:
+ env.declare_type(template_param.name, template_param, self.pos)
+
+ base_type = self.base_type.analyse(env)
+
+ if base_type.is_fused and not self.in_pxd and (env.is_c_class_scope or
+ env.is_module_scope):
+ error(self.pos, "Fused types not allowed here")
+ return error_type
+
+ self.entry = None
+ visibility = self.visibility
+
+ for declarator in self.declarators:
+
+ if (len(self.declarators) > 1
+ and not isinstance(declarator, CNameDeclaratorNode)
+ and env.directives['warn.multiple_declarators']):
+ warning(
+ declarator.pos,
+ "Non-trivial type declarators in shared declaration (e.g. mix of pointers and values). "
+ "Each pointer declaration should be on its own line.", 1)
+
+ create_extern_wrapper = (self.overridable
+ and self.visibility == 'extern'
+ and env.is_module_scope)
+ if create_extern_wrapper:
+ declarator.overridable = False
+ if isinstance(declarator, CFuncDeclaratorNode):
+ name_declarator, type = declarator.analyse(
+ base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
+ else:
+ name_declarator, type = declarator.analyse(
+ base_type, env, visibility=visibility, in_pxd=self.in_pxd)
+ if not type.is_complete():
+ if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
+ error(declarator.pos, "Variable type '%s' is incomplete" % type)
+ if self.visibility == 'extern' and type.is_pyobject:
+ error(declarator.pos, "Python object cannot be declared extern")
+ name = name_declarator.name
+ cname = name_declarator.cname
+ if name == '':
+ error(declarator.pos, "Missing name in declaration.")
+ return
+ if type.is_reference and self.visibility != 'extern':
+ error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
+ if type.is_cfunction:
+ if 'staticmethod' in env.directives:
+ type.is_static_method = True
+ self.entry = dest_scope.declare_cfunction(
+ name, type, declarator.pos,
+ cname=cname, visibility=self.visibility, in_pxd=self.in_pxd,
+ api=self.api, modifiers=self.modifiers, overridable=self.overridable)
+ if self.entry is not None:
+ self.entry.directive_locals = copy.copy(self.directive_locals)
+ if create_extern_wrapper:
+ self.entry.type.create_to_py_utility_code(env)
+ self.entry.create_wrapper = True
+ else:
+ if self.overridable:
+ warning(self.pos, "cpdef variables will not be supported in Cython 3; "
+ "currently they are no different from cdef variables", 2)
+ if self.directive_locals:
+ error(self.pos, "Decorators can only be followed by functions")
+ self.entry = dest_scope.declare_var(
+ name, type, declarator.pos,
+ cname=cname, visibility=visibility, in_pxd=self.in_pxd,
+ api=self.api, is_cdef=1)
+ if Options.docstrings:
+ self.entry.doc = embed_position(self.pos, self.doc)
+
+
+class CStructOrUnionDefNode(StatNode):
+ # name string
+ # cname string or None
+ # kind "struct" or "union"
+ # typedef_flag boolean
+ # visibility "public" or "private"
+ # api boolean
+ # in_pxd boolean
+ # attributes [CVarDefNode] or None
+ # entry Entry
+ # packed boolean
+
+ child_attrs = ["attributes"]
+
+ def declare(self, env, scope=None):
+ self.entry = env.declare_struct_or_union(
+ self.name, self.kind, scope, self.typedef_flag, self.pos,
+ self.cname, visibility=self.visibility, api=self.api,
+ packed=self.packed)
+
+ def analyse_declarations(self, env):
+ scope = None
+ if self.attributes is not None:
+ scope = StructOrUnionScope(self.name)
+ self.declare(env, scope)
+ if self.attributes is not None:
+ if self.in_pxd and not env.in_cinclude:
+ self.entry.defined_in_pxd = 1
+ for attr in self.attributes:
+ attr.analyse_declarations(env, scope)
+ if self.visibility != 'extern':
+ for attr in scope.var_entries:
+ type = attr.type
+ while type.is_array:
+ type = type.base_type
+ if type == self.entry.type:
+ error(attr.pos, "Struct cannot contain itself as a member.")
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class CppClassNode(CStructOrUnionDefNode, BlockNode):
+
+ # name string
+ # cname string or None
+ # visibility "extern"
+ # in_pxd boolean
+ # attributes [CVarDefNode] or None
+ # entry Entry
+ # base_classes [CBaseTypeNode]
+ # templates [(string, bool)] or None
+ # decorators [DecoratorNode] or None
+
+ decorators = None
+
+ def declare(self, env):
+ if self.templates is None:
+ template_types = None
+ else:
+ template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
+ for template_name, required in self.templates]
+ num_optional_templates = sum(not required for _, required in self.templates)
+ if num_optional_templates and not all(required for _, required in self.templates[:-num_optional_templates]):
+ error(self.pos, "Required template parameters must precede optional template parameters.")
+ self.entry = env.declare_cpp_class(
+ self.name, None, self.pos, self.cname,
+ base_classes=[], visibility=self.visibility, templates=template_types)
+
+ def analyse_declarations(self, env):
+ if self.templates is None:
+ template_types = template_names = None
+ else:
+ template_names = [template_name for template_name, _ in self.templates]
+ template_types = [PyrexTypes.TemplatePlaceholderType(template_name, not required)
+ for template_name, required in self.templates]
+ scope = None
+ if self.attributes is not None:
+ scope = CppClassScope(self.name, env, templates=template_names)
+ def base_ok(base_class):
+ if base_class.is_cpp_class or base_class.is_struct:
+ return True
+ else:
+ error(self.pos, "Base class '%s' not a struct or class." % base_class)
+ base_class_types = filter(base_ok, [b.analyse(scope or env) for b in self.base_classes])
+ self.entry = env.declare_cpp_class(
+ self.name, scope, self.pos,
+ self.cname, base_class_types, visibility=self.visibility, templates=template_types)
+ if self.entry is None:
+ return
+ self.entry.is_cpp_class = 1
+ if scope is not None:
+ scope.type = self.entry.type
+ defined_funcs = []
+ def func_attributes(attributes):
+ for attr in attributes:
+ if isinstance(attr, CFuncDefNode):
+ yield attr
+ elif isinstance(attr, CompilerDirectivesNode):
+ for sub_attr in func_attributes(attr.body.stats):
+ yield sub_attr
+ if self.attributes is not None:
+ if self.in_pxd and not env.in_cinclude:
+ self.entry.defined_in_pxd = 1
+ for attr in self.attributes:
+ declare = getattr(attr, 'declare', None)
+ if declare:
+ attr.declare(scope)
+ attr.analyse_declarations(scope)
+ for func in func_attributes(self.attributes):
+ defined_funcs.append(func)
+ if self.templates is not None:
+ func.template_declaration = "template <typename %s>" % ", typename ".join(template_names)
+ self.body = StatListNode(self.pos, stats=defined_funcs)
+ self.scope = scope
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(self.entry.type.scope)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(self.entry.type.scope, code)
+
+ def generate_execution_code(self, code):
+ self.body.generate_execution_code(code)
+
+ def annotate(self, code):
+ self.body.annotate(code)
+
+
+class CEnumDefNode(StatNode):
+ # name string or None
+ # cname string or None
+ # items [CEnumDefItemNode]
+ # typedef_flag boolean
+ # visibility "public" or "private" or "extern"
+ # api boolean
+ # in_pxd boolean
+ # create_wrapper boolean
+ # entry Entry
+
+ child_attrs = ["items"]
+
+ def declare(self, env):
+ self.entry = env.declare_enum(
+ self.name, self.pos,
+ cname=self.cname, typedef_flag=self.typedef_flag,
+ visibility=self.visibility, api=self.api,
+ create_wrapper=self.create_wrapper)
+
+ def analyse_declarations(self, env):
+ if self.items is not None:
+ if self.in_pxd and not env.in_cinclude:
+ self.entry.defined_in_pxd = 1
+ for item in self.items:
+ item.analyse_declarations(env, self.entry)
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ if self.visibility == 'public' or self.api:
+ code.mark_pos(self.pos)
+ temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
+ for item in self.entry.enum_values:
+ code.putln("%s = PyInt_FromLong(%s); %s" % (
+ temp,
+ item.cname,
+ code.error_goto_if_null(temp, item.pos)))
+ code.put_gotref(temp)
+ code.putln('if (PyDict_SetItemString(%s, "%s", %s) < 0) %s' % (
+ Naming.moddict_cname,
+ item.name,
+ temp,
+ code.error_goto(item.pos)))
+ code.put_decref_clear(temp, PyrexTypes.py_object_type)
+ code.funcstate.release_temp(temp)
+
+
+class CEnumDefItemNode(StatNode):
+ # name string
+ # cname string or None
+ # value ExprNode or None
+
+ child_attrs = ["value"]
+
+ def analyse_declarations(self, env, enum_entry):
+ if self.value:
+ self.value = self.value.analyse_const_expression(env)
+ if not self.value.type.is_int:
+ self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
+ self.value = self.value.analyse_const_expression(env)
+ entry = env.declare_const(
+ self.name, enum_entry.type,
+ self.value, self.pos, cname=self.cname,
+ visibility=enum_entry.visibility, api=enum_entry.api,
+ create_wrapper=enum_entry.create_wrapper and enum_entry.name is None)
+ enum_entry.enum_values.append(entry)
+ if enum_entry.name:
+ enum_entry.type.values.append(entry.name)
+
+
+class CTypeDefNode(StatNode):
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+ # visibility "public" or "private"
+ # api boolean
+ # in_pxd boolean
+
+ child_attrs = ["base_type", "declarator"]
+
+ def analyse_declarations(self, env):
+ base = self.base_type.analyse(env)
+ name_declarator, type = self.declarator.analyse(
+ base, env, visibility=self.visibility, in_pxd=self.in_pxd)
+ name = name_declarator.name
+ cname = name_declarator.cname
+
+ entry = env.declare_typedef(
+ name, type, self.pos,
+ cname=cname, visibility=self.visibility, api=self.api)
+
+ if type.is_fused:
+ entry.in_cinclude = True
+
+ if self.in_pxd and not env.in_cinclude:
+ entry.defined_in_pxd = 1
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class FuncDefNode(StatNode, BlockNode):
+ # Base class for function definition nodes.
+ #
+ # return_type PyrexType
+ # #filename string C name of filename string const
+ # entry Symtab.Entry
+ # needs_closure boolean Whether or not this function has inner functions/classes/yield
+ # needs_outer_scope boolean Whether or not this function requires outer scope
+ # pymethdef_required boolean Force Python method struct generation
+ # directive_locals { string : ExprNode } locals defined by cython.locals(...)
+ # directive_returns [ExprNode] type defined by cython.returns(...)
+ # star_arg PyArgDeclNode or None * argument
+ # starstar_arg PyArgDeclNode or None ** argument
+ #
+ # is_async_def boolean is a Coroutine function
+ #
+ # has_fused_arguments boolean
+ # Whether this cdef function has fused parameters. This is needed
+ # by AnalyseDeclarationsTransform, so it can replace CFuncDefNodes
+ # with fused argument types with a FusedCFuncDefNode
+
+ py_func = None
+ needs_closure = False
+ needs_outer_scope = False
+ pymethdef_required = False
+ is_generator = False
+ is_generator_body = False
+ is_async_def = False
+ modifiers = []
+ has_fused_arguments = False
+ star_arg = None
+ starstar_arg = None
+ is_cyfunction = False
+ code_object = None
+
+ def analyse_default_values(self, env):
+ default_seen = 0
+ for arg in self.args:
+ if arg.default:
+ default_seen = 1
+ if arg.is_generic:
+ arg.default = arg.default.analyse_types(env)
+ arg.default = arg.default.coerce_to(arg.type, env)
+ else:
+ error(arg.pos, "This argument cannot have a default value")
+ arg.default = None
+ elif arg.kw_only:
+ default_seen = 1
+ elif default_seen:
+ error(arg.pos, "Non-default argument following default argument")
+
+ def analyse_annotation(self, env, annotation):
+ # Annotations can not only contain valid Python expressions but arbitrary type references.
+ if annotation is None:
+ return None
+ if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
+ annotation = annotation.analyse_types(env)
+ return annotation
+
+ def analyse_annotations(self, env):
+ for arg in self.args:
+ if arg.annotation:
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
+
+ def align_argument_type(self, env, arg):
+ # @cython.locals()
+ directive_locals = self.directive_locals
+ orig_type = arg.type
+ if arg.name in directive_locals:
+ type_node = directive_locals[arg.name]
+ other_type = type_node.analyse_as_type(env)
+ elif isinstance(arg, CArgDeclNode) and arg.annotation and env.directives['annotation_typing']:
+ type_node = arg.annotation
+ other_type = arg.inject_type_from_annotations(env)
+ if other_type is None:
+ return arg
+ else:
+ return arg
+ if other_type is None:
+ error(type_node.pos, "Not a type")
+ elif orig_type is not py_object_type and not orig_type.same_as(other_type):
+ error(arg.base_type.pos, "Signature does not agree with previous declaration")
+ error(type_node.pos, "Previous declaration here")
+ else:
+ arg.type = other_type
+ return arg
+
+ def need_gil_acquisition(self, lenv):
+ return 0
+
+ def create_local_scope(self, env):
+ genv = env
+ while genv.is_py_class_scope or genv.is_c_class_scope:
+ genv = genv.outer_scope
+ if self.needs_closure:
+ lenv = ClosureScope(name=self.entry.name,
+ outer_scope=genv,
+ parent_scope=env,
+ scope_name=self.entry.cname)
+ else:
+ lenv = LocalScope(name=self.entry.name,
+ outer_scope=genv,
+ parent_scope=env)
+ lenv.return_type = self.return_type
+ type = self.entry.type
+ if type.is_cfunction:
+ lenv.nogil = type.nogil and not type.with_gil
+ self.local_scope = lenv
+ lenv.directives = env.directives
+ return lenv
+
+ def generate_function_body(self, env, code):
+ self.body.generate_execution_code(code)
+
+ def generate_function_definitions(self, env, code):
+ from . import Buffer
+ if self.return_type.is_memoryviewslice:
+ from . import MemoryView
+
+ lenv = self.local_scope
+ if lenv.is_closure_scope and not lenv.is_passthrough:
+ outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
+ Naming.outer_scope_cname)
+ else:
+ outer_scope_cname = Naming.outer_scope_cname
+ lenv.mangle_closure_cnames(outer_scope_cname)
+ # Generate closure function definitions
+ self.body.generate_function_definitions(lenv, code)
+ # generate lambda function definitions
+ self.generate_lambda_definitions(lenv, code)
+
+ is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
+ self.entry.scope.is_c_class_scope)
+ is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
+ self.entry.scope.is_c_class_scope)
+ is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
+ if is_buffer_slot:
+ if 'cython_unused' not in self.modifiers:
+ self.modifiers = self.modifiers + ['cython_unused']
+
+ preprocessor_guard = self.get_preprocessor_guard()
+
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("Profile", "Profile.c"))
+
+ # Generate C code for header and body of function
+ code.enter_cfunc_scope(lenv)
+ code.return_from_error_cleanup_label = code.new_label()
+ code.funcstate.gil_owned = not lenv.nogil
+
+ # ----- Top-level constants used by this function
+ code.mark_pos(self.pos)
+ self.generate_cached_builtins_decls(lenv, code)
+ # ----- Function header
+ code.putln("")
+
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+
+ with_pymethdef = (self.needs_assignment_synthesis(env, code) or
+ self.pymethdef_required)
+ if self.py_func:
+ self.py_func.generate_function_header(
+ code, with_pymethdef=with_pymethdef, proto_only=True)
+ self.generate_function_header(code, with_pymethdef=with_pymethdef)
+ # ----- Local variable declarations
+ # Find function scope
+ cenv = env
+ while cenv.is_py_class_scope or cenv.is_c_class_scope:
+ cenv = cenv.outer_scope
+ if self.needs_closure:
+ code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
+ code.putln(";")
+ elif self.needs_outer_scope:
+ if lenv.is_passthrough:
+ code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
+ code.putln(";")
+ code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
+ code.putln(";")
+ self.generate_argument_declarations(lenv, code)
+
+ for entry in lenv.var_entries:
+ if not (entry.in_closure or entry.is_arg):
+ code.put_var_declaration(entry)
+
+ # Initialize the return variable __pyx_r
+ init = ""
+ if not self.return_type.is_void:
+ if self.return_type.is_pyobject:
+ init = " = NULL"
+ elif self.return_type.is_memoryviewslice:
+ init = ' = ' + MemoryView.memslice_entry_init
+
+ code.putln("%s%s;" % (
+ self.return_type.declaration_code(Naming.retval_cname),
+ init))
+
+ tempvardecl_code = code.insertion_point()
+ self.generate_keyword_list(code)
+
+ # ----- GIL acquisition
+ acquire_gil = self.acquire_gil
+
+ # See if we need to acquire the GIL for variable declarations, or for
+ # refnanny only
+
+ # Closures are not currently possible for cdef nogil functions,
+ # but check them anyway
+ have_object_args = self.needs_closure or self.needs_outer_scope
+ for arg in lenv.arg_entries:
+ if arg.type.is_pyobject:
+ have_object_args = True
+ break
+
+ used_buffer_entries = [entry for entry in lenv.buffer_entries if entry.used]
+
+ acquire_gil_for_var_decls_only = (
+ lenv.nogil and lenv.has_with_gil_block and
+ (have_object_args or used_buffer_entries))
+
+ acquire_gil_for_refnanny_only = (
+ lenv.nogil and lenv.has_with_gil_block and not
+ acquire_gil_for_var_decls_only)
+
+ use_refnanny = not lenv.nogil or lenv.has_with_gil_block
+
+ if acquire_gil or acquire_gil_for_var_decls_only:
+ code.put_ensure_gil()
+ code.funcstate.gil_owned = True
+ elif lenv.nogil and lenv.has_with_gil_block:
+ code.declare_gilstate()
+
+ if profile or linetrace:
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ tempvardecl_code.put_trace_declarations()
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
+
+ # ----- Special check for getbuffer
+ if is_getbuffer_slot:
+ self.getbuffer_check(code)
+
+ # ----- set up refnanny
+ if use_refnanny:
+ tempvardecl_code.put_declare_refcount_context()
+ code.put_setup_refcount_context(
+ self.entry.name, acquire_gil=acquire_gil_for_refnanny_only)
+
+ # ----- Automatic lead-ins for certain special functions
+ if is_getbuffer_slot:
+ self.getbuffer_init(code)
+ # ----- Create closure scope object
+ if self.needs_closure:
+ tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
+ slot_func_cname = TypeSlots.get_slot_function(lenv.scope_class.type.scope, tp_slot)
+ if not slot_func_cname:
+ slot_func_cname = '%s->tp_new' % lenv.scope_class.type.typeptr_cname
+ code.putln("%s = (%s)%s(%s, %s, NULL);" % (
+ Naming.cur_scope_cname,
+ lenv.scope_class.type.empty_declaration_code(),
+ slot_func_cname,
+ lenv.scope_class.type.typeptr_cname,
+ Naming.empty_tuple))
+ code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
+ # Scope unconditionally DECREFed on return.
+ code.putln("%s = %s;" % (
+ Naming.cur_scope_cname,
+ lenv.scope_class.type.cast_code("Py_None")))
+ code.put_incref("Py_None", py_object_type)
+ code.putln(code.error_goto(self.pos))
+ code.putln("} else {")
+ code.put_gotref(Naming.cur_scope_cname)
+ code.putln("}")
+ # Note that it is unsafe to decref the scope at this point.
+ if self.needs_outer_scope:
+ if self.is_cyfunction:
+ code.putln("%s = (%s) __Pyx_CyFunction_GetClosure(%s);" % (
+ outer_scope_cname,
+ cenv.scope_class.type.empty_declaration_code(),
+ Naming.self_cname))
+ else:
+ code.putln("%s = (%s) %s;" % (
+ outer_scope_cname,
+ cenv.scope_class.type.empty_declaration_code(),
+ Naming.self_cname))
+ if lenv.is_passthrough:
+ code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname))
+ elif self.needs_closure:
+ # inner closures own a reference to their outer parent
+ code.put_incref(outer_scope_cname, cenv.scope_class.type)
+ code.put_giveref(outer_scope_cname)
+ # ----- Trace function call
+ if profile or linetrace:
+ # this looks a bit late, but if we don't get here due to a
+ # fatal error before hand, it's not really worth tracing
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.is_wrapper:
+ trace_name = self.entry.name + " (wrapper)"
+ else:
+ trace_name = self.entry.name
+ code.put_trace_call(
+ trace_name, self.pos, nogil=not code.funcstate.gil_owned)
+ code.funcstate.can_trace = True
+ # ----- Fetch arguments
+ self.generate_argument_parsing_code(env, code)
+ # If an argument is assigned to in the body, we must
+ # incref it to properly keep track of refcounts.
+ is_cdef = isinstance(self, CFuncDefNode)
+ for entry in lenv.arg_entries:
+ if entry.type.is_pyobject:
+ if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
+ code.put_var_incref(entry)
+
+ # Note: defaults are always incref-ed. For def functions, we
+ # we acquire arguments from object conversion, so we have
+ # new references. If we are a cdef function, we need to
+ # incref our arguments
+ elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
+ code.put_incref_memoryviewslice(entry.cname, have_gil=code.funcstate.gil_owned)
+ for entry in lenv.var_entries:
+ if entry.is_arg and len(entry.cf_assignments) > 1 and not entry.in_closure:
+ if entry.xdecref_cleanup:
+ code.put_var_xincref(entry)
+ else:
+ code.put_var_incref(entry)
+
+ # ----- Initialise local buffer auxiliary variables
+ for entry in lenv.var_entries + lenv.arg_entries:
+ if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
+ Buffer.put_init_vars(entry, code)
+
+ # ----- Check and convert arguments
+ self.generate_argument_type_tests(code)
+ # ----- Acquire buffer arguments
+ for entry in lenv.arg_entries:
+ if entry.type.is_buffer:
+ Buffer.put_acquire_arg_buffer(entry, code, self.pos)
+
+ if acquire_gil_for_var_decls_only:
+ code.put_release_ensured_gil()
+ code.funcstate.gil_owned = False
+
+ # -------------------------
+ # ----- Function body -----
+ # -------------------------
+ self.generate_function_body(env, code)
+
+ code.mark_pos(self.pos, trace=False)
+ code.putln("")
+ code.putln("/* function exit code */")
+
+ # ----- Default return value
+ if not self.body.is_terminator:
+ if self.return_type.is_pyobject:
+ #if self.return_type.is_extension_type:
+ # lhs = "(PyObject *)%s" % Naming.retval_cname
+ #else:
+ lhs = Naming.retval_cname
+ code.put_init_to_py_none(lhs, self.return_type)
+ else:
+ val = self.return_type.default_value
+ if val:
+ code.putln("%s = %s;" % (Naming.retval_cname, val))
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+ # ----- Error cleanup
+ if code.error_label in code.labels_used:
+ if not self.body.is_terminator:
+ code.put_goto(code.return_label)
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type, have_gil=not lenv.nogil)
+
+ # Clean up buffers -- this calls a Python function
+ # so need to save and restore error state
+ buffers_present = len(used_buffer_entries) > 0
+ #memslice_entries = [e for e in lenv.entries.values() if e.type.is_memoryviewslice]
+ if buffers_present:
+ code.globalstate.use_utility_code(restore_exception_utility_code)
+ code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
+ code.putln("__Pyx_PyThreadState_declare")
+ code.putln("__Pyx_PyThreadState_assign")
+ code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
+ for entry in used_buffer_entries:
+ Buffer.put_release_buffer_code(code, entry)
+ #code.putln("%s = 0;" % entry.cname)
+ code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
+
+ if self.return_type.is_memoryviewslice:
+ MemoryView.put_init_entry(Naming.retval_cname, code)
+ err_val = Naming.retval_cname
+ else:
+ err_val = self.error_value()
+
+ exc_check = self.caller_will_check_exceptions()
+ if err_val is not None or exc_check:
+ # TODO: Fix exception tracing (though currently unused by cProfile).
+ # code.globalstate.use_utility_code(get_exception_tuple_utility_code)
+ # code.put_trace_exception()
+
+ if lenv.nogil and not lenv.has_with_gil_block:
+ code.putln("{")
+ code.put_ensure_gil()
+
+ code.put_add_traceback(self.entry.qualified_name)
+
+ if lenv.nogil and not lenv.has_with_gil_block:
+ code.put_release_ensured_gil()
+ code.putln("}")
+ else:
+ warning(self.entry.pos,
+ "Unraisable exception in function '%s'." %
+ self.entry.qualified_name, 0)
+ code.put_unraisable(self.entry.qualified_name, lenv.nogil)
+ default_retval = self.return_type.default_value
+ if err_val is None and default_retval:
+ err_val = default_retval
+ if err_val is not None:
+ if err_val != Naming.retval_cname:
+ code.putln("%s = %s;" % (Naming.retval_cname, err_val))
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+
+ if is_getbuffer_slot:
+ self.getbuffer_error_cleanup(code)
+
+ # If we are using the non-error cleanup section we should
+ # jump past it if we have an error. The if-test below determine
+ # whether this section is used.
+ if buffers_present or is_getbuffer_slot or self.return_type.is_memoryviewslice:
+ code.put_goto(code.return_from_error_cleanup_label)
+
+ # ----- Non-error return cleanup
+ code.put_label(code.return_label)
+ for entry in used_buffer_entries:
+ Buffer.put_release_buffer_code(code, entry)
+ if is_getbuffer_slot:
+ self.getbuffer_normal_cleanup(code)
+
+ if self.return_type.is_memoryviewslice:
+ # See if our return value is uninitialized on non-error return
+ # from . import MemoryView
+ # MemoryView.err_if_nogil_initialized_check(self.pos, env)
+ cond = code.unlikely(self.return_type.error_condition(Naming.retval_cname))
+ code.putln(
+ 'if (%s) {' % cond)
+ if env.nogil:
+ code.put_ensure_gil()
+ code.putln(
+ 'PyErr_SetString(PyExc_TypeError, "Memoryview return value is not initialized");')
+ if env.nogil:
+ code.put_release_ensured_gil()
+ code.putln(
+ '}')
+
+ # ----- Return cleanup for both error and no-error return
+ code.put_label(code.return_from_error_cleanup_label)
+
+ for entry in lenv.var_entries:
+ if not entry.used or entry.in_closure:
+ continue
+
+ if entry.type.is_memoryviewslice:
+ code.put_xdecref_memoryviewslice(entry.cname, have_gil=not lenv.nogil)
+ elif entry.type.is_pyobject:
+ if not entry.is_arg or len(entry.cf_assignments) > 1:
+ if entry.xdecref_cleanup:
+ code.put_var_xdecref(entry)
+ else:
+ code.put_var_decref(entry)
+
+ # Decref any increfed args
+ for entry in lenv.arg_entries:
+ if entry.type.is_pyobject:
+ if (acquire_gil or len(entry.cf_assignments) > 1) and not entry.in_closure:
+ code.put_var_decref(entry)
+ elif (entry.type.is_memoryviewslice and
+ (not is_cdef or len(entry.cf_assignments) > 1)):
+ # decref slices of def functions and acquired slices from cdef
+ # functions, but not borrowed slices from cdef functions.
+ code.put_xdecref_memoryviewslice(entry.cname,
+ have_gil=not lenv.nogil)
+ if self.needs_closure:
+ code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
+
+ # ----- Return
+ # This code is duplicated in ModuleNode.generate_module_init_func
+ if not lenv.nogil:
+ default_retval = self.return_type.default_value
+ err_val = self.error_value()
+ if err_val is None and default_retval:
+ err_val = default_retval # FIXME: why is err_val not used?
+ if self.return_type.is_pyobject:
+ code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
+
+ if self.entry.is_special and self.entry.name == "__hash__":
+ # Returning -1 for __hash__ is supposed to signal an error
+ # We do as Python instances and coerce -1 into -2.
+ code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
+ Naming.retval_cname, Naming.retval_cname))
+
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.return_type.is_pyobject:
+ code.put_trace_return(
+ Naming.retval_cname, nogil=not code.funcstate.gil_owned)
+ else:
+ code.put_trace_return(
+ "Py_None", nogil=not code.funcstate.gil_owned)
+
+ if not lenv.nogil:
+ # GIL holding function
+ code.put_finish_refcount_context()
+
+ if acquire_gil or (lenv.nogil and lenv.has_with_gil_block):
+ # release the GIL (note that with-gil blocks acquire it on exit in their EnsureGILNode)
+ code.put_release_ensured_gil()
+ code.funcstate.gil_owned = False
+
+ if not self.return_type.is_void:
+ code.putln("return %s;" % Naming.retval_cname)
+
+ code.putln("}")
+
+ if preprocessor_guard:
+ code.putln("#endif /*!(%s)*/" % preprocessor_guard)
+
+ # ----- Go back and insert temp variable declarations
+ tempvardecl_code.put_temp_declarations(code.funcstate)
+
+ # ----- Python version
+ code.exit_cfunc_scope()
+ if self.py_func:
+ self.py_func.generate_function_definitions(env, code)
+ self.generate_wrapper_functions(code)
+
+ def declare_argument(self, env, arg):
+ if arg.type.is_void:
+ error(arg.pos, "Invalid use of 'void'")
+ elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
+ error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
+ entry = env.declare_arg(arg.name, arg.type, arg.pos)
+ if arg.annotation:
+ entry.annotation = arg.annotation
+ return entry
+
+ def generate_arg_type_test(self, arg, code):
+ # Generate type test for one argument.
+ if arg.type.typeobj_is_available():
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ArgTypeTest", "FunctionArguments.c"))
+ typeptr_cname = arg.type.typeptr_cname
+ arg_code = "((PyObject *)%s)" % arg.entry.cname
+ code.putln(
+ 'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
+ arg_code,
+ typeptr_cname,
+ arg.accept_none,
+ arg.name,
+ arg.type.is_builtin_type and arg.type.require_exact,
+ code.error_goto(arg.pos)))
+ else:
+ error(arg.pos, "Cannot test type of extern C class without type object name specification")
+
+ def generate_arg_none_check(self, arg, code):
+ # Generate None check for one argument.
+ if arg.type.is_memoryviewslice:
+ cname = "%s.memview" % arg.entry.cname
+ else:
+ cname = arg.entry.cname
+
+ code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % cname)
+ code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%%.%ds' must not be None", "%s"); %s''' % (
+ max(200, len(arg.name)), arg.name,
+ code.error_goto(arg.pos)))
+ code.putln('}')
+
+ def generate_wrapper_functions(self, code):
+ pass
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ # Evaluate and store argument default values
+ for arg in self.args:
+ if not arg.is_dynamic:
+ arg.generate_assignment_code(code)
+
+ #
+ # Special code for the __getbuffer__ function
+ #
+ def _get_py_buffer_info(self):
+ py_buffer = self.local_scope.arg_entries[1]
+ try:
+ # Check builtin definition of struct Py_buffer
+ obj_type = py_buffer.type.base_type.scope.entries['obj'].type
+ except (AttributeError, KeyError):
+ # User code redeclared struct Py_buffer
+ obj_type = None
+ return py_buffer, obj_type
+
+ # Old Python 3 used to support write-locks on buffer-like objects by
+ # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
+ # feature is obsolete, it was almost never used (only one instance in
+ # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
+ # (see bpo-14203). We add an extra check here to prevent legacy code from
+ # from trying to use the feature and prevent segmentation faults.
+ def getbuffer_check(self, code):
+ py_buffer, _ = self._get_py_buffer_info()
+ view = py_buffer.cname
+ code.putln("if (%s == NULL) {" % view)
+ code.putln("PyErr_SetString(PyExc_BufferError, "
+ "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
+ code.putln("return -1;")
+ code.putln("}")
+
+ def getbuffer_init(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.put_init_to_py_none("%s->obj" % view, obj_type)
+ code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ else:
+ code.putln("%s->obj = NULL;" % view)
+
+ def getbuffer_error_cleanup(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj != NULL) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+ else:
+ code.putln("Py_CLEAR(%s->obj);" % view)
+
+ def getbuffer_normal_cleanup(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj == Py_None) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+
+ def get_preprocessor_guard(self):
+ if not self.entry.is_special:
+ return None
+ name = self.entry.name
+ slot = TypeSlots.method_name_to_slot.get(name)
+ if not slot:
+ return None
+ if name == '__long__' and not self.entry.scope.lookup_here('__int__'):
+ return None
+ if name in ("__getbuffer__", "__releasebuffer__") and self.entry.scope.is_c_class_scope:
+ return None
+ return slot.preprocessor_guard_code()
+
+
+class CFuncDefNode(FuncDefNode):
+ # C function definition.
+ #
+ # modifiers ['inline']
+ # visibility 'private' or 'public' or 'extern'
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+ # cfunc_declarator the CFuncDeclarator of this function
+ # (this is also available through declarator or a
+ # base thereof)
+ # body StatListNode
+ # api boolean
+ # decorators [DecoratorNode] list of decorators
+ #
+ # with_gil boolean Acquire GIL around body
+ # type CFuncType
+ # py_func wrapper for calling from Python
+ # overridable whether or not this is a cpdef function
+ # inline_in_pxd whether this is an inline function in a pxd file
+ # template_declaration String or None Used for c++ class methods
+ # is_const_method whether this is a const method
+ # is_static_method whether this is a static method
+ # is_c_class_method whether this is a cclass method
+
+ child_attrs = ["base_type", "declarator", "body", "py_func_stat"]
+
+ inline_in_pxd = False
+ decorators = None
+ directive_locals = None
+ directive_returns = None
+ override = None
+ template_declaration = None
+ is_const_method = False
+ py_func_stat = None
+
+ def unqualified_name(self):
+ return self.entry.name
+
+ @property
+ def code_object(self):
+ # share the CodeObject with the cpdef wrapper (if available)
+ return self.py_func.code_object if self.py_func else None
+
+ def analyse_declarations(self, env):
+ self.is_c_class_method = env.is_c_class_scope
+ if self.directive_locals is None:
+ self.directive_locals = {}
+ self.directive_locals.update(env.directives.get('locals', {}))
+ if self.directive_returns is not None:
+ base_type = self.directive_returns.analyse_as_type(env)
+ if base_type is None:
+ error(self.directive_returns.pos, "Not a type")
+ base_type = PyrexTypes.error_type
+ else:
+ base_type = self.base_type.analyse(env)
+ self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod')
+ # The 2 here is because we need both function and argument names.
+ if isinstance(self.declarator, CFuncDeclaratorNode):
+ name_declarator, type = self.declarator.analyse(
+ base_type, env, nonempty=2 * (self.body is not None),
+ directive_locals=self.directive_locals, visibility=self.visibility)
+ else:
+ name_declarator, type = self.declarator.analyse(
+ base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
+ if not type.is_cfunction:
+ error(self.pos, "Suite attached to non-function declaration")
+ # Remember the actual type according to the function header
+ # written here, because the type in the symbol table entry
+ # may be different if we're overriding a C method inherited
+ # from the base type of an extension type.
+ self.type = type
+ type.is_overridable = self.overridable
+ declarator = self.declarator
+ while not hasattr(declarator, 'args'):
+ declarator = declarator.base
+
+ self.cfunc_declarator = declarator
+ self.args = declarator.args
+
+ opt_arg_count = self.cfunc_declarator.optional_arg_count
+ if (self.visibility == 'public' or self.api) and opt_arg_count:
+ error(self.cfunc_declarator.pos,
+ "Function with optional arguments may not be declared public or api")
+
+ if type.exception_check == '+' and self.visibility != 'extern':
+ warning(self.cfunc_declarator.pos,
+ "Only extern functions can throw C++ exceptions.")
+
+ for formal_arg, type_arg in zip(self.args, type.args):
+ self.align_argument_type(env, type_arg)
+ formal_arg.type = type_arg.type
+ formal_arg.name = type_arg.name
+ formal_arg.cname = type_arg.cname
+
+ self._validate_type_visibility(type_arg.type, type_arg.pos, env)
+
+ if type_arg.type.is_fused:
+ self.has_fused_arguments = True
+
+ if type_arg.type.is_buffer and 'inline' in self.modifiers:
+ warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
+
+ if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
+ if self.type.nogil:
+ error(formal_arg.pos,
+ "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
+ elif 'inline' in self.modifiers:
+ warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
+
+ self._validate_type_visibility(type.return_type, self.pos, env)
+
+ name = name_declarator.name
+ cname = name_declarator.cname
+
+ type.is_const_method = self.is_const_method
+ type.is_static_method = self.is_static_method
+ self.entry = env.declare_cfunction(
+ name, type, self.pos,
+ cname=cname, visibility=self.visibility, api=self.api,
+ defining=self.body is not None, modifiers=self.modifiers,
+ overridable=self.overridable)
+ self.entry.inline_func_in_pxd = self.inline_in_pxd
+ self.return_type = type.return_type
+ if self.return_type.is_array and self.visibility != 'extern':
+ error(self.pos, "Function cannot return an array")
+ if self.return_type.is_cpp_class:
+ self.return_type.check_nullary_constructor(self.pos, "used as a return value")
+
+ if self.overridable and not env.is_module_scope and not self.is_static_method:
+ if len(self.args) < 1 or not self.args[0].type.is_pyobject:
+ # An error will be produced in the cdef function
+ self.overridable = False
+
+ self.declare_cpdef_wrapper(env)
+ self.create_local_scope(env)
+
+ def declare_cpdef_wrapper(self, env):
+ if self.overridable:
+ if self.is_static_method:
+ # TODO(robertwb): Finish this up, perhaps via more function refactoring.
+ error(self.pos, "static cpdef methods not yet supported")
+ name = self.entry.name
+ py_func_body = self.call_self_node(is_module_scope=env.is_module_scope)
+ if self.is_static_method:
+ from .ExprNodes import NameNode
+ decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name='staticmethod'))]
+ decorators[0].decorator.analyse_types(env)
+ else:
+ decorators = []
+ self.py_func = DefNode(pos=self.pos,
+ name=self.entry.name,
+ args=self.args,
+ star_arg=None,
+ starstar_arg=None,
+ doc=self.doc,
+ body=py_func_body,
+ decorators=decorators,
+ is_wrapper=1)
+ self.py_func.is_module_scope = env.is_module_scope
+ self.py_func.analyse_declarations(env)
+ self.py_func.entry.is_overridable = True
+ self.py_func_stat = StatListNode(self.pos, stats=[self.py_func])
+ self.py_func.type = PyrexTypes.py_object_type
+ self.entry.as_variable = self.py_func.entry
+ self.entry.used = self.entry.as_variable.used = True
+ # Reset scope entry the above cfunction
+ env.entries[name] = self.entry
+ if (not self.entry.is_final_cmethod and
+ (not env.is_module_scope or Options.lookup_module_cpdef)):
+ self.override = OverrideCheckNode(self.pos, py_func=self.py_func)
+ self.body = StatListNode(self.pos, stats=[self.override, self.body])
+
+ def _validate_type_visibility(self, type, pos, env):
+ """
+ Ensure that types used in cdef functions are public or api, or
+ defined in a C header.
+ """
+ public_or_api = (self.visibility == 'public' or self.api)
+ entry = getattr(type, 'entry', None)
+ if public_or_api and entry and env.is_module_scope:
+ if not (entry.visibility in ('public', 'extern') or
+ entry.api or entry.in_cinclude):
+ error(pos, "Function declared public or api may not have private types")
+
+ def call_self_node(self, omit_optional_args=0, is_module_scope=0):
+ from . import ExprNodes
+ args = self.type.args
+ if omit_optional_args:
+ args = args[:len(args) - self.type.optional_arg_count]
+ arg_names = [arg.name for arg in args]
+ if is_module_scope:
+ cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
+ call_arg_names = arg_names
+ skip_dispatch = Options.lookup_module_cpdef
+ elif self.type.is_static_method:
+ class_entry = self.entry.scope.parent_type.entry
+ class_node = ExprNodes.NameNode(self.pos, name=class_entry.name)
+ class_node.entry = class_entry
+ cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name)
+ # Calling static c(p)def methods on an instance disallowed.
+ # TODO(robertwb): Support by passing self to check for override?
+ skip_dispatch = True
+ else:
+ type_entry = self.type.args[0].type.entry
+ type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name)
+ type_arg.entry = type_entry
+ cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name)
+ skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
+ c_call = ExprNodes.SimpleCallNode(
+ self.pos,
+ function=cfunc,
+ args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names],
+ wrapper_call=skip_dispatch)
+ return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
+
+ def declare_arguments(self, env):
+ for arg in self.type.args:
+ if not arg.name:
+ error(arg.pos, "Missing argument name")
+ self.declare_argument(env, arg)
+
+ def need_gil_acquisition(self, lenv):
+ return self.type.with_gil
+
+ def nogil_check(self, env):
+ type = self.type
+ with_gil = type.with_gil
+ if type.nogil and not with_gil:
+ if type.return_type.is_pyobject:
+ error(self.pos,
+ "Function with Python return type cannot be declared nogil")
+ for entry in self.local_scope.var_entries:
+ if entry.type.is_pyobject and not entry.in_with_gil_block:
+ error(self.pos, "Function declared nogil has Python locals or temporaries")
+
+ def analyse_expressions(self, env):
+ self.local_scope.directives = env.directives
+ if self.py_func_stat is not None:
+ # this will also analyse the default values and the function name assignment
+ self.py_func_stat = self.py_func_stat.analyse_expressions(env)
+ elif self.py_func is not None:
+ # this will also analyse the default values
+ self.py_func = self.py_func.analyse_expressions(env)
+ else:
+ self.analyse_default_values(env)
+ self.analyse_annotations(env)
+ self.acquire_gil = self.need_gil_acquisition(self.local_scope)
+ return self
+
+ def needs_assignment_synthesis(self, env, code=None):
+ return False
+
+ def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None):
+ scope = self.local_scope
+ arg_decls = []
+ type = self.type
+ for arg in type.args[:len(type.args)-type.optional_arg_count]:
+ arg_decl = arg.declaration_code()
+ entry = scope.lookup(arg.name)
+ if not entry.cf_used:
+ arg_decl = 'CYTHON_UNUSED %s' % arg_decl
+ arg_decls.append(arg_decl)
+ if with_dispatch and self.overridable:
+ dispatch_arg = PyrexTypes.c_int_type.declaration_code(
+ Naming.skip_dispatch_cname)
+ if self.override:
+ arg_decls.append(dispatch_arg)
+ else:
+ arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
+ if type.optional_arg_count and with_opt_args:
+ arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ if type.has_varargs:
+ arg_decls.append("...")
+ if not arg_decls:
+ arg_decls = ["void"]
+ if cname is None:
+ cname = self.entry.func_cname
+ entity = type.function_header_code(cname, ', '.join(arg_decls))
+ if self.entry.visibility == 'private' and '::' not in cname:
+ storage_class = "static "
+ else:
+ storage_class = ""
+ dll_linkage = None
+ modifiers = code.build_function_modifiers(self.entry.func_modifiers)
+
+ header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
+ #print (storage_class, modifiers, header)
+ needs_proto = self.is_c_class_method
+ if self.template_declaration:
+ if needs_proto:
+ code.globalstate.parts['module_declarations'].putln(self.template_declaration)
+ code.putln(self.template_declaration)
+ if needs_proto:
+ code.globalstate.parts['module_declarations'].putln(
+ "%s%s%s; /* proto*/" % (storage_class, modifiers, header))
+ code.putln("%s%s%s {" % (storage_class, modifiers, header))
+
+ def generate_argument_declarations(self, env, code):
+ scope = self.local_scope
+ for arg in self.args:
+ if arg.default:
+ entry = scope.lookup(arg.name)
+ if self.override or entry.cf_used:
+ result = arg.calculate_default_value_code(code)
+ code.putln('%s = %s;' % (
+ arg.type.declaration_code(arg.cname), result))
+
+ def generate_keyword_list(self, code):
+ pass
+
+ def generate_argument_parsing_code(self, env, code):
+ i = 0
+ used = 0
+ scope = self.local_scope
+ if self.type.optional_arg_count:
+ code.putln('if (%s) {' % Naming.optional_args_cname)
+ for arg in self.args:
+ if arg.default:
+ entry = scope.lookup(arg.name)
+ if self.override or entry.cf_used:
+ code.putln('if (%s->%sn > %s) {' %
+ (Naming.optional_args_cname,
+ Naming.pyrex_prefix, i))
+ declarator = arg.declarator
+ while not hasattr(declarator, 'name'):
+ declarator = declarator.base
+ code.putln('%s = %s->%s;' %
+ (arg.cname, Naming.optional_args_cname,
+ self.type.opt_arg_cname(declarator.name)))
+ used += 1
+ i += 1
+ for _ in range(used):
+ code.putln('}')
+ code.putln('}')
+
+ # Move arguments into closure if required
+ def put_into_closure(entry):
+ if entry.in_closure and not arg.default:
+ code.putln('%s = %s;' % (entry.cname, entry.original_cname))
+ if entry.type.is_memoryviewslice:
+ code.put_incref_memoryviewslice(entry.cname, have_gil=True)
+ else:
+ code.put_var_incref(entry)
+ code.put_var_giveref(entry)
+ for arg in self.args:
+ put_into_closure(scope.lookup_here(arg.name))
+
+
+ def generate_argument_conversion_code(self, code):
+ pass
+
+ def generate_argument_type_tests(self, code):
+ # Generate type tests for args whose type in a parent
+ # class is a supertype of the declared type.
+ for arg in self.type.args:
+ if arg.needs_type_test:
+ self.generate_arg_type_test(arg, code)
+ elif arg.type.is_pyobject and not arg.accept_none:
+ self.generate_arg_none_check(arg, code)
+
+ def generate_execution_code(self, code):
+ if code.globalstate.directives['linetrace']:
+ code.mark_pos(self.pos)
+ code.putln("") # generate line tracing code
+ super(CFuncDefNode, self).generate_execution_code(code)
+ if self.py_func_stat:
+ self.py_func_stat.generate_execution_code(code)
+
+ def error_value(self):
+ if self.return_type.is_pyobject:
+ return "0"
+ else:
+ #return None
+ return self.entry.type.exception_value
+
+ def caller_will_check_exceptions(self):
+ return self.entry.type.exception_check
+
+ def generate_wrapper_functions(self, code):
+ # If the C signature of a function has changed, we need to generate
+ # wrappers to put in the slots here.
+ k = 0
+ entry = self.entry
+ func_type = entry.type
+ while entry.prev_entry is not None:
+ k += 1
+ entry = entry.prev_entry
+ entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
+ code.putln()
+ self.generate_function_header(
+ code, 0,
+ with_dispatch=entry.type.is_overridable,
+ with_opt_args=entry.type.optional_arg_count,
+ cname=entry.func_cname)
+ if not self.return_type.is_void:
+ code.put('return ')
+ args = self.type.args
+ arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
+ if entry.type.is_overridable:
+ arglist.append(Naming.skip_dispatch_cname)
+ elif func_type.is_overridable:
+ arglist.append('0')
+ if entry.type.optional_arg_count:
+ arglist.append(Naming.optional_args_cname)
+ elif func_type.optional_arg_count:
+ arglist.append('NULL')
+ code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
+ code.putln('}')
+
+
+class PyArgDeclNode(Node):
+ # Argument which must be a Python object (used
+ # for * and ** arguments).
+ #
+ # name string
+ # entry Symtab.Entry
+ # annotation ExprNode or None Py3 argument annotation
+ child_attrs = []
+ is_self_arg = False
+ is_type_arg = False
+
+ def generate_function_definitions(self, env, code):
+ self.entry.generate_function_definitions(env, code)
+
+
+class DecoratorNode(Node):
+ # A decorator
+ #
+ # decorator NameNode or CallNode or AttributeNode
+ child_attrs = ['decorator']
+
+
+class DefNode(FuncDefNode):
+ # A Python function definition.
+ #
+ # name string the Python name of the function
+ # lambda_name string the internal name of a lambda 'function'
+ # decorators [DecoratorNode] list of decorators
+ # args [CArgDeclNode] formal arguments
+ # doc EncodedString or None
+ # body StatListNode
+ # return_type_annotation
+ # ExprNode or None the Py3 return type annotation
+ #
+ # The following subnode is constructed internally
+ # when the def statement is inside a Python class definition.
+ #
+ # fused_py_func DefNode The original fused cpdef DefNode
+ # (in case this is a specialization)
+ # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
+ # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
+ #
+ # decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
+
+ child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
+ outer_attrs = ["decorators", "return_type_annotation"]
+
+ is_staticmethod = False
+ is_classmethod = False
+
+ lambda_name = None
+ reqd_kw_flags_cname = "0"
+ is_wrapper = 0
+ no_assignment_synthesis = 0
+ decorators = None
+ return_type_annotation = None
+ entry = None
+ acquire_gil = 0
+ self_in_stararg = 0
+ py_cfunc_node = None
+ requires_classobj = False
+ defaults_struct = None # Dynamic kwrds structure name
+ doc = None
+
+ fused_py_func = False
+ specialized_cpdefs = None
+ py_wrapper = None
+ py_wrapper_required = True
+ func_cname = None
+
+ defaults_getter = None
+
+ def __init__(self, pos, **kwds):
+ FuncDefNode.__init__(self, pos, **kwds)
+ k = rk = r = 0
+ for arg in self.args:
+ if arg.kw_only:
+ k += 1
+ if not arg.default:
+ rk += 1
+ if not arg.default:
+ r += 1
+ self.num_kwonly_args = k
+ self.num_required_kw_args = rk
+ self.num_required_args = r
+
+ def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, except_val=None, modifiers=None,
+ nogil=False, with_gil=False):
+ if self.star_arg:
+ error(self.star_arg.pos, "cdef function cannot have star argument")
+ if self.starstar_arg:
+ error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
+ exception_value, exception_check = except_val or (None, False)
+
+ if cfunc is None:
+ cfunc_args = []
+ for formal_arg in self.args:
+ name_declarator, type = formal_arg.analyse(scope, nonempty=1)
+ cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
+ cname=None,
+ annotation=formal_arg.annotation,
+ type=py_object_type,
+ pos=formal_arg.pos))
+ cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
+ args=cfunc_args,
+ has_varargs=False,
+ exception_value=None,
+ exception_check=exception_check,
+ nogil=nogil,
+ with_gil=with_gil,
+ is_overridable=overridable)
+ cfunc = CVarDefNode(self.pos, type=cfunc_type)
+ else:
+ if scope is None:
+ scope = cfunc.scope
+ cfunc_type = cfunc.type
+ if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
+ error(self.pos, "wrong number of arguments")
+ error(cfunc.pos, "previous declaration here")
+ for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
+ name_declarator, type = formal_arg.analyse(scope, nonempty=1,
+ is_self_arg=(i == 0 and scope.is_c_class_scope))
+ if type is None or type is PyrexTypes.py_object_type:
+ formal_arg.type = type_arg.type
+ formal_arg.name_declarator = name_declarator
+
+ if exception_value is None and cfunc_type.exception_value is not None:
+ from .ExprNodes import ConstNode
+ exception_value = ConstNode(
+ self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
+ declarator = CFuncDeclaratorNode(self.pos,
+ base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
+ args=self.args,
+ has_varargs=False,
+ exception_check=cfunc_type.exception_check,
+ exception_value=exception_value,
+ with_gil=cfunc_type.with_gil,
+ nogil=cfunc_type.nogil)
+ return CFuncDefNode(self.pos,
+ modifiers=modifiers or [],
+ base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
+ declarator=declarator,
+ body=self.body,
+ doc=self.doc,
+ overridable=cfunc_type.is_overridable,
+ type=cfunc_type,
+ with_gil=cfunc_type.with_gil,
+ nogil=cfunc_type.nogil,
+ visibility='private',
+ api=False,
+ directive_locals=getattr(cfunc, 'directive_locals', {}),
+ directive_returns=returns)
+
+ def is_cdef_func_compatible(self):
+ """Determines if the function's signature is compatible with a
+ cdef function. This can be used before calling
+ .as_cfunction() to see if that will be successful.
+ """
+ if self.needs_closure:
+ return False
+ if self.star_arg or self.starstar_arg:
+ return False
+ return True
+
+ def analyse_declarations(self, env):
+ if self.decorators:
+ for decorator in self.decorators:
+ func = decorator.decorator
+ if func.is_name:
+ self.is_classmethod |= func.name == 'classmethod'
+ self.is_staticmethod |= func.name == 'staticmethod'
+
+ if self.is_classmethod and env.lookup_here('classmethod'):
+ # classmethod() was overridden - not much we can do here ...
+ self.is_classmethod = False
+ if self.is_staticmethod and env.lookup_here('staticmethod'):
+ # staticmethod() was overridden - not much we can do here ...
+ self.is_staticmethod = False
+
+ if self.name == '__new__' and env.is_py_class_scope:
+ self.is_staticmethod = 1
+
+ self.analyse_argument_types(env)
+ if self.name == '<lambda>':
+ self.declare_lambda_function(env)
+ else:
+ self.declare_pyfunction(env)
+
+ self.analyse_signature(env)
+ self.return_type = self.entry.signature.return_type()
+ # if a signature annotation provides a more specific return object type, use it
+ if self.return_type is py_object_type and self.return_type_annotation:
+ if env.directives['annotation_typing'] and not self.entry.is_special:
+ _, return_type = analyse_type_annotation(self.return_type_annotation, env)
+ if return_type and return_type.is_pyobject:
+ self.return_type = return_type
+
+ self.create_local_scope(env)
+
+ self.py_wrapper = DefNodeWrapper(
+ self.pos,
+ target=self,
+ name=self.entry.name,
+ args=self.args,
+ star_arg=self.star_arg,
+ starstar_arg=self.starstar_arg,
+ return_type=self.return_type)
+ self.py_wrapper.analyse_declarations(env)
+
+ def analyse_argument_types(self, env):
+ self.directive_locals = env.directives.get('locals', {})
+ allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
+
+ f2s = env.fused_to_specific
+ env.fused_to_specific = None
+
+ for arg in self.args:
+ if hasattr(arg, 'name'):
+ name_declarator = None
+ else:
+ base_type = arg.base_type.analyse(env)
+ # If we hare in pythran mode and we got a buffer supported by
+ # Pythran, we change this node to a fused type
+ if has_np_pythran(env) and base_type.is_pythran_expr:
+ base_type = PyrexTypes.FusedType([
+ base_type,
+ #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
+ base_type.org_buffer])
+ name_declarator, type = \
+ arg.declarator.analyse(base_type, env)
+ arg.name = name_declarator.name
+ arg.type = type
+
+ if type.is_fused:
+ self.has_fused_arguments = True
+
+ self.align_argument_type(env, arg)
+ if name_declarator and name_declarator.cname:
+ error(self.pos, "Python function argument cannot have C name specification")
+ arg.type = arg.type.as_argument_type()
+ arg.hdr_type = None
+ arg.needs_conversion = 0
+ arg.needs_type_test = 0
+ arg.is_generic = 1
+ if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
+ if arg.or_none:
+ arg.accept_none = True
+ elif arg.not_none:
+ arg.accept_none = False
+ elif (arg.type.is_extension_type or arg.type.is_builtin_type
+ or arg.type.is_buffer or arg.type.is_memoryviewslice):
+ if arg.default and arg.default.constant_result is None:
+ # special case: def func(MyType obj = None)
+ arg.accept_none = True
+ else:
+ # default depends on compiler directive
+ arg.accept_none = allow_none_for_extension_args
+ else:
+ # probably just a plain 'object'
+ arg.accept_none = True
+ else:
+ arg.accept_none = True # won't be used, but must be there
+ if arg.not_none:
+ error(arg.pos, "Only Python type arguments can have 'not None'")
+ if arg.or_none:
+ error(arg.pos, "Only Python type arguments can have 'or None'")
+ env.fused_to_specific = f2s
+
+ if has_np_pythran(env):
+ self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
+ else:
+ self.np_args_idx = []
+
+ def analyse_signature(self, env):
+ if self.entry.is_special:
+ if self.decorators:
+ error(self.pos, "special functions of cdef classes cannot have decorators")
+ self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
+ elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
+ # Use the simpler calling signature for zero- and one-argument functions.
+ if self.entry.signature is TypeSlots.pyfunction_signature:
+ if len(self.args) == 0:
+ self.entry.signature = TypeSlots.pyfunction_noargs
+ elif len(self.args) == 1:
+ if self.args[0].default is None and not self.args[0].kw_only:
+ self.entry.signature = TypeSlots.pyfunction_onearg
+ elif self.entry.signature is TypeSlots.pymethod_signature:
+ if len(self.args) == 1:
+ self.entry.signature = TypeSlots.unaryfunc
+ elif len(self.args) == 2:
+ if self.args[1].default is None and not self.args[1].kw_only:
+ self.entry.signature = TypeSlots.ibinaryfunc
+
+ sig = self.entry.signature
+ nfixed = sig.num_fixed_args()
+ if (sig is TypeSlots.pymethod_signature and nfixed == 1
+ and len(self.args) == 0 and self.star_arg):
+ # this is the only case where a diverging number of
+ # arguments is not an error - when we have no explicit
+ # 'self' parameter as in method(*args)
+ sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
+ self.self_in_stararg = 1
+ nfixed = 0
+
+ if self.is_staticmethod and env.is_c_class_scope:
+ nfixed = 0
+ self.self_in_stararg = True # FIXME: why for staticmethods?
+
+ self.entry.signature = sig = copy.copy(sig)
+ sig.fixed_arg_format = "*"
+ sig.is_staticmethod = True
+ sig.has_generic_args = True
+
+ if ((self.is_classmethod or self.is_staticmethod) and
+ self.has_fused_arguments and env.is_c_class_scope):
+ del self.decorator_indirection.stats[:]
+
+ for i in range(min(nfixed, len(self.args))):
+ arg = self.args[i]
+ arg.is_generic = 0
+ if sig.is_self_arg(i) and not self.is_staticmethod:
+ if self.is_classmethod:
+ arg.is_type_arg = 1
+ arg.hdr_type = arg.type = Builtin.type_type
+ else:
+ arg.is_self_arg = 1
+ arg.hdr_type = arg.type = env.parent_type
+ arg.needs_conversion = 0
+ else:
+ arg.hdr_type = sig.fixed_arg_type(i)
+ if not arg.type.same_as(arg.hdr_type):
+ if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
+ arg.needs_type_test = 1
+ else:
+ arg.needs_conversion = 1
+ if arg.needs_conversion:
+ arg.hdr_cname = Naming.arg_prefix + arg.name
+ else:
+ arg.hdr_cname = Naming.var_prefix + arg.name
+
+ if nfixed > len(self.args):
+ self.bad_signature()
+ return
+ elif nfixed < len(self.args):
+ if not sig.has_generic_args:
+ self.bad_signature()
+ for arg in self.args:
+ if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type):
+ arg.needs_type_test = 1
+
+ def bad_signature(self):
+ sig = self.entry.signature
+ expected_str = "%d" % sig.num_fixed_args()
+ if sig.has_generic_args:
+ expected_str += " or more"
+ name = self.name
+ if name.startswith("__") and name.endswith("__"):
+ desc = "Special method"
+ else:
+ desc = "Method"
+ error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % (
+ desc, self.name, len(self.args), expected_str))
+
+ def declare_pyfunction(self, env):
+ #print "DefNode.declare_pyfunction:", self.name, "in", env ###
+ name = self.name
+ entry = env.lookup_here(name)
+ if entry:
+ if entry.is_final_cmethod and not env.parent_type.is_final_type:
+ error(self.pos, "Only final types can have final Python (def/cpdef) methods")
+ if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper:
+ warning(self.pos, "Overriding cdef method with def method.", 5)
+ entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
+ self.entry = entry
+ prefix = env.next_id(env.scope_prefix)
+ self.entry.pyfunc_cname = Naming.pyfunc_prefix + prefix + name
+ if Options.docstrings:
+ entry.doc = embed_position(self.pos, self.doc)
+ entry.doc_cname = Naming.funcdoc_prefix + prefix + name
+ if entry.is_special:
+ if entry.name in TypeSlots.invisible or not entry.doc or (
+ entry.name in '__getattr__' and env.directives['fast_getattr']):
+ entry.wrapperbase_cname = None
+ else:
+ entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
+ else:
+ entry.doc = None
+
+ def declare_lambda_function(self, env):
+ entry = env.declare_lambda_function(self.lambda_name, self.pos)
+ entry.doc = None
+ self.entry = entry
+ self.entry.pyfunc_cname = entry.cname
+
+ def declare_arguments(self, env):
+ for arg in self.args:
+ if not arg.name:
+ error(arg.pos, "Missing argument name")
+ if arg.needs_conversion:
+ arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
+ if arg.type.is_pyobject:
+ arg.entry.init = "0"
+ else:
+ arg.entry = self.declare_argument(env, arg)
+ arg.entry.is_arg = 1
+ arg.entry.used = 1
+ arg.entry.is_self_arg = arg.is_self_arg
+ self.declare_python_arg(env, self.star_arg)
+ self.declare_python_arg(env, self.starstar_arg)
+
+ def declare_python_arg(self, env, arg):
+ if arg:
+ if env.directives['infer_types'] != False:
+ type = PyrexTypes.unspecified_type
+ else:
+ type = py_object_type
+ entry = env.declare_var(arg.name, type, arg.pos)
+ entry.is_arg = 1
+ entry.used = 1
+ entry.init = "0"
+ entry.xdecref_cleanup = 1
+ arg.entry = entry
+
+ def analyse_expressions(self, env):
+ self.local_scope.directives = env.directives
+ self.analyse_default_values(env)
+ self.analyse_annotations(env)
+ if self.return_type_annotation:
+ self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
+
+ if not self.needs_assignment_synthesis(env) and self.decorators:
+ for decorator in self.decorators[::-1]:
+ decorator.decorator = decorator.decorator.analyse_expressions(env)
+
+ self.py_wrapper.prepare_argument_coercion(env)
+ return self
+
+ def needs_assignment_synthesis(self, env, code=None):
+ if self.is_staticmethod:
+ return True
+ if self.specialized_cpdefs or self.entry.is_fused_specialized:
+ return False
+ if self.no_assignment_synthesis:
+ return False
+ if self.entry.is_special:
+ return False
+ if self.entry.is_anonymous:
+ return True
+ if env.is_module_scope or env.is_c_class_scope:
+ if code is None:
+ return self.local_scope.directives['binding']
+ else:
+ return code.globalstate.directives['binding']
+ return env.is_py_class_scope or env.is_closure_scope
+
+ def error_value(self):
+ return self.entry.signature.error_value
+
+ def caller_will_check_exceptions(self):
+ return self.entry.signature.exception_check
+
+ def generate_function_definitions(self, env, code):
+ if self.defaults_getter:
+ # defaults getter must never live in class scopes, it's always a module function
+ self.defaults_getter.generate_function_definitions(env.global_scope(), code)
+
+ # Before closure cnames are mangled
+ if self.py_wrapper_required:
+ # func_cname might be modified by @cname
+ self.py_wrapper.func_cname = self.entry.func_cname
+ self.py_wrapper.generate_function_definitions(env, code)
+ FuncDefNode.generate_function_definitions(self, env, code)
+
+ def generate_function_header(self, code, with_pymethdef, proto_only=0):
+ if proto_only:
+ if self.py_wrapper_required:
+ self.py_wrapper.generate_function_header(
+ code, with_pymethdef, True)
+ return
+ arg_code_list = []
+ if self.entry.signature.has_dummy_arg:
+ self_arg = 'PyObject *%s' % Naming.self_cname
+ if not self.needs_outer_scope:
+ self_arg = 'CYTHON_UNUSED ' + self_arg
+ arg_code_list.append(self_arg)
+
+ def arg_decl_code(arg):
+ entry = arg.entry
+ if entry.in_closure:
+ cname = entry.original_cname
+ else:
+ cname = entry.cname
+ decl = entry.type.declaration_code(cname)
+ if not entry.cf_used:
+ decl = 'CYTHON_UNUSED ' + decl
+ return decl
+
+ for arg in self.args:
+ arg_code_list.append(arg_decl_code(arg))
+ if self.star_arg:
+ arg_code_list.append(arg_decl_code(self.star_arg))
+ if self.starstar_arg:
+ arg_code_list.append(arg_decl_code(self.starstar_arg))
+ if arg_code_list:
+ arg_code = ', '.join(arg_code_list)
+ else:
+ arg_code = 'void' # No arguments
+ dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
+
+ decls_code = code.globalstate['decls']
+ preprocessor_guard = self.get_preprocessor_guard()
+ if preprocessor_guard:
+ decls_code.putln(preprocessor_guard)
+ decls_code.putln(
+ "static %s(%s); /* proto */" % (dc, arg_code))
+ if preprocessor_guard:
+ decls_code.putln("#endif")
+ code.putln("static %s(%s) {" % (dc, arg_code))
+
+ def generate_argument_declarations(self, env, code):
+ pass
+
+ def generate_keyword_list(self, code):
+ pass
+
+ def generate_argument_parsing_code(self, env, code):
+ # Move arguments into closure if required
+ def put_into_closure(entry):
+ if entry.in_closure:
+ code.putln('%s = %s;' % (entry.cname, entry.original_cname))
+ if entry.xdecref_cleanup:
+ # mostly applies to the starstar arg - this can sometimes be NULL
+ # so must be xincrefed instead
+ code.put_var_xincref(entry)
+ code.put_var_xgiveref(entry)
+ else:
+ code.put_var_incref(entry)
+ code.put_var_giveref(entry)
+ for arg in self.args:
+ put_into_closure(arg.entry)
+ for arg in self.star_arg, self.starstar_arg:
+ if arg:
+ put_into_closure(arg.entry)
+
+ def generate_argument_type_tests(self, code):
+ pass
+
+
+class DefNodeWrapper(FuncDefNode):
+ # DefNode python wrapper code generator
+
+ defnode = None
+ target = None # Target DefNode
+
+ def __init__(self, *args, **kwargs):
+ FuncDefNode.__init__(self, *args, **kwargs)
+ self.num_kwonly_args = self.target.num_kwonly_args
+ self.num_required_kw_args = self.target.num_required_kw_args
+ self.num_required_args = self.target.num_required_args
+ self.self_in_stararg = self.target.self_in_stararg
+ self.signature = None
+
+ def analyse_declarations(self, env):
+ target_entry = self.target.entry
+ name = self.name
+ prefix = env.next_id(env.scope_prefix)
+ target_entry.func_cname = Naming.pywrap_prefix + prefix + name
+ target_entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
+
+ self.signature = target_entry.signature
+
+ self.np_args_idx = self.target.np_args_idx
+
+ def prepare_argument_coercion(self, env):
+ # This is only really required for Cython utility code at this time,
+ # everything else can be done during code generation. But we expand
+ # all utility code here, simply because we cannot easily distinguish
+ # different code types.
+ for arg in self.args:
+ if not arg.type.is_pyobject:
+ if not arg.type.create_from_py_utility_code(env):
+ pass # will fail later
+ elif arg.hdr_type and not arg.hdr_type.is_pyobject:
+ if not arg.hdr_type.create_to_py_utility_code(env):
+ pass # will fail later
+
+ if self.starstar_arg and not self.starstar_arg.entry.cf_used:
+ # we will set the kwargs argument to NULL instead of a new dict
+ # and must therefore correct the control flow state
+ entry = self.starstar_arg.entry
+ entry.xdecref_cleanup = 1
+ for ass in entry.cf_assignments:
+ if not ass.is_arg and ass.lhs.is_name:
+ ass.lhs.cf_maybe_null = True
+
+ def signature_has_nongeneric_args(self):
+ argcount = len(self.args)
+ if argcount == 0 or (
+ argcount == 1 and (self.args[0].is_self_arg or
+ self.args[0].is_type_arg)):
+ return 0
+ return 1
+
+ def signature_has_generic_args(self):
+ return self.signature.has_generic_args
+
+ def generate_function_body(self, code):
+ args = []
+ if self.signature.has_dummy_arg:
+ args.append(Naming.self_cname)
+ for arg in self.args:
+ if arg.hdr_type and not (arg.type.is_memoryviewslice or
+ arg.type.is_struct or
+ arg.type.is_complex):
+ args.append(arg.type.cast_code(arg.entry.cname))
+ else:
+ args.append(arg.entry.cname)
+ if self.star_arg:
+ args.append(self.star_arg.entry.cname)
+ if self.starstar_arg:
+ args.append(self.starstar_arg.entry.cname)
+ args = ', '.join(args)
+ if not self.return_type.is_void:
+ code.put('%s = ' % Naming.retval_cname)
+ code.putln('%s(%s);' % (
+ self.target.entry.pyfunc_cname, args))
+
+ def generate_function_definitions(self, env, code):
+ lenv = self.target.local_scope
+ # Generate C code for header and body of function
+ code.mark_pos(self.pos)
+ code.putln("")
+ code.putln("/* Python wrapper */")
+ preprocessor_guard = self.target.get_preprocessor_guard()
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+
+ code.enter_cfunc_scope(lenv)
+ code.return_from_error_cleanup_label = code.new_label()
+
+ with_pymethdef = (self.target.needs_assignment_synthesis(env, code) or
+ self.target.pymethdef_required)
+ self.generate_function_header(code, with_pymethdef)
+ self.generate_argument_declarations(lenv, code)
+ tempvardecl_code = code.insertion_point()
+
+ if self.return_type.is_pyobject:
+ retval_init = ' = 0'
+ else:
+ retval_init = ''
+ if not self.return_type.is_void:
+ code.putln('%s%s;' % (
+ self.return_type.declaration_code(Naming.retval_cname),
+ retval_init))
+ code.put_declare_refcount_context()
+ code.put_setup_refcount_context('%s (wrapper)' % self.name)
+
+ self.generate_argument_parsing_code(lenv, code)
+ self.generate_argument_type_tests(code)
+ self.generate_function_body(code)
+
+ # ----- Go back and insert temp variable declarations
+ tempvardecl_code.put_temp_declarations(code.funcstate)
+
+ code.mark_pos(self.pos)
+ code.putln("")
+ code.putln("/* function exit code */")
+
+ # ----- Error cleanup
+ if code.error_label in code.labels_used:
+ code.put_goto(code.return_label)
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ err_val = self.error_value()
+ if err_val is not None:
+ code.putln("%s = %s;" % (Naming.retval_cname, err_val))
+
+ # ----- Non-error return cleanup
+ code.put_label(code.return_label)
+ for entry in lenv.var_entries:
+ if entry.is_arg and entry.type.is_pyobject:
+ code.put_var_decref(entry)
+
+ code.put_finish_refcount_context()
+ if not self.return_type.is_void:
+ code.putln("return %s;" % Naming.retval_cname)
+ code.putln('}')
+ code.exit_cfunc_scope()
+ if preprocessor_guard:
+ code.putln("#endif /*!(%s)*/" % preprocessor_guard)
+
+ def generate_function_header(self, code, with_pymethdef, proto_only=0):
+ arg_code_list = []
+ sig = self.signature
+
+ if sig.has_dummy_arg or self.self_in_stararg:
+ arg_code = "PyObject *%s" % Naming.self_cname
+ if not sig.has_dummy_arg:
+ arg_code = 'CYTHON_UNUSED ' + arg_code
+ arg_code_list.append(arg_code)
+
+ for arg in self.args:
+ if not arg.is_generic:
+ if arg.is_self_arg or arg.is_type_arg:
+ arg_code_list.append("PyObject *%s" % arg.hdr_cname)
+ else:
+ arg_code_list.append(
+ arg.hdr_type.declaration_code(arg.hdr_cname))
+ entry = self.target.entry
+ if not entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
+ arg_code_list.append("CYTHON_UNUSED PyObject *unused")
+ if entry.scope.is_c_class_scope and entry.name == "__ipow__":
+ arg_code_list.append("CYTHON_UNUSED PyObject *unused")
+ if sig.has_generic_args:
+ arg_code_list.append(
+ "PyObject *%s, PyObject *%s" % (
+ Naming.args_cname, Naming.kwds_cname))
+ arg_code = ", ".join(arg_code_list)
+
+ # Prevent warning: unused function '__pyx_pw_5numpy_7ndarray_1__getbuffer__'
+ mf = ""
+ if (entry.name in ("__getbuffer__", "__releasebuffer__")
+ and entry.scope.is_c_class_scope):
+ mf = "CYTHON_UNUSED "
+ with_pymethdef = False
+
+ dc = self.return_type.declaration_code(entry.func_cname)
+ header = "static %s%s(%s)" % (mf, dc, arg_code)
+ code.putln("%s; /*proto*/" % header)
+
+ if proto_only:
+ if self.target.fused_py_func:
+ # If we are the specialized version of the cpdef, we still
+ # want the prototype for the "fused cpdef", in case we're
+ # checking to see if our method was overridden in Python
+ self.target.fused_py_func.generate_function_header(
+ code, with_pymethdef, proto_only=True)
+ return
+
+ if (Options.docstrings and entry.doc and
+ not self.target.fused_py_func and
+ not entry.scope.is_property_scope and
+ (not entry.is_special or entry.wrapperbase_cname)):
+ # h_code = code.globalstate['h_code']
+ docstr = entry.doc
+
+ if docstr.is_unicode:
+ docstr = docstr.as_utf8_string()
+
+ if not (entry.is_special and entry.name in ('__getbuffer__', '__releasebuffer__')):
+ code.putln('static char %s[] = %s;' % (
+ entry.doc_cname,
+ docstr.as_c_string_literal()))
+
+ if entry.is_special:
+ code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC')
+ code.putln(
+ "struct wrapperbase %s;" % entry.wrapperbase_cname)
+ code.putln('#endif')
+
+ if with_pymethdef or self.target.fused_py_func:
+ code.put(
+ "static PyMethodDef %s = " % entry.pymethdef_cname)
+ code.put_pymethoddef(self.target.entry, ";", allow_skip=False)
+ code.putln("%s {" % header)
+
+ def generate_argument_declarations(self, env, code):
+ for arg in self.args:
+ if arg.is_generic:
+ if arg.needs_conversion:
+ code.putln("PyObject *%s = 0;" % arg.hdr_cname)
+ else:
+ code.put_var_declaration(arg.entry)
+ for entry in env.var_entries:
+ if entry.is_arg:
+ code.put_var_declaration(entry)
+
+ def generate_argument_parsing_code(self, env, code):
+ # Generate fast equivalent of PyArg_ParseTuple call for
+ # generic arguments, if any, including args/kwargs
+ old_error_label = code.new_error_label()
+ our_error_label = code.error_label
+ end_label = code.new_label("argument_unpacking_done")
+
+ has_kwonly_args = self.num_kwonly_args > 0
+ has_star_or_kw_args = self.star_arg is not None \
+ or self.starstar_arg is not None or has_kwonly_args
+
+ for arg in self.args:
+ if not arg.type.is_pyobject:
+ if not arg.type.create_from_py_utility_code(env):
+ pass # will fail later
+
+ if not self.signature_has_generic_args():
+ if has_star_or_kw_args:
+ error(self.pos, "This method cannot have * or keyword arguments")
+ self.generate_argument_conversion_code(code)
+
+ elif not self.signature_has_nongeneric_args():
+ # func(*args) or func(**kw) or func(*args, **kw)
+ self.generate_stararg_copy_code(code)
+
+ else:
+ self.generate_tuple_and_keyword_parsing_code(self.args, end_label, code)
+
+ code.error_label = old_error_label
+ if code.label_used(our_error_label):
+ if not code.label_used(end_label):
+ code.put_goto(end_label)
+ code.put_label(our_error_label)
+ if has_star_or_kw_args:
+ self.generate_arg_decref(self.star_arg, code)
+ if self.starstar_arg:
+ if self.starstar_arg.entry.xdecref_cleanup:
+ code.put_var_xdecref_clear(self.starstar_arg.entry)
+ else:
+ code.put_var_decref_clear(self.starstar_arg.entry)
+ code.put_add_traceback(self.target.entry.qualified_name)
+ code.put_finish_refcount_context()
+ code.putln("return %s;" % self.error_value())
+ if code.label_used(end_label):
+ code.put_label(end_label)
+
+ def generate_arg_xdecref(self, arg, code):
+ if arg:
+ code.put_var_xdecref_clear(arg.entry)
+
+ def generate_arg_decref(self, arg, code):
+ if arg:
+ code.put_var_decref_clear(arg.entry)
+
+ def generate_stararg_copy_code(self, code):
+ if not self.star_arg:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
+ code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
+ Naming.args_cname)
+ code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
+ self.name, Naming.args_cname, self.error_value()))
+ code.putln("}")
+
+ if self.starstar_arg:
+ if self.star_arg or not self.starstar_arg.entry.cf_used:
+ kwarg_check = "unlikely(%s)" % Naming.kwds_cname
+ else:
+ kwarg_check = "%s" % Naming.kwds_cname
+ else:
+ kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
+ Naming.kwds_cname, Naming.kwds_cname)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("KeywordStringCheck", "FunctionArguments.c"))
+ code.putln(
+ "if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
+ kwarg_check, Naming.kwds_cname, self.name,
+ bool(self.starstar_arg), self.error_value()))
+
+ if self.starstar_arg and self.starstar_arg.entry.cf_used:
+ if all(ref.node.allow_null for ref in self.starstar_arg.entry.cf_references):
+ code.putln("if (%s) {" % kwarg_check)
+ code.putln("%s = PyDict_Copy(%s); if (unlikely(!%s)) return %s;" % (
+ self.starstar_arg.entry.cname,
+ Naming.kwds_cname,
+ self.starstar_arg.entry.cname,
+ self.error_value()))
+ code.put_gotref(self.starstar_arg.entry.cname)
+ code.putln("} else {")
+ code.putln("%s = NULL;" % (self.starstar_arg.entry.cname,))
+ code.putln("}")
+ self.starstar_arg.entry.xdecref_cleanup = 1
+ else:
+ code.put("%s = (%s) ? PyDict_Copy(%s) : PyDict_New(); " % (
+ self.starstar_arg.entry.cname,
+ Naming.kwds_cname,
+ Naming.kwds_cname))
+ code.putln("if (unlikely(!%s)) return %s;" % (
+ self.starstar_arg.entry.cname, self.error_value()))
+ self.starstar_arg.entry.xdecref_cleanup = 0
+ code.put_gotref(self.starstar_arg.entry.cname)
+
+ if self.self_in_stararg and not self.target.is_staticmethod:
+ # need to create a new tuple with 'self' inserted as first item
+ code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
+ self.star_arg.entry.cname,
+ Naming.args_cname,
+ self.star_arg.entry.cname))
+ if self.starstar_arg and self.starstar_arg.entry.cf_used:
+ code.putln("{")
+ code.put_xdecref_clear(self.starstar_arg.entry.cname, py_object_type)
+ code.putln("return %s;" % self.error_value())
+ code.putln("}")
+ else:
+ code.putln("return %s;" % self.error_value())
+ code.put_gotref(self.star_arg.entry.cname)
+ code.put_incref(Naming.self_cname, py_object_type)
+ code.put_giveref(Naming.self_cname)
+ code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
+ self.star_arg.entry.cname, Naming.self_cname))
+ temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
+ temp, temp, Naming.args_cname, temp))
+ code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
+ Naming.args_cname, temp))
+ code.put_incref("item", py_object_type)
+ code.put_giveref("item")
+ code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
+ self.star_arg.entry.cname, temp))
+ code.putln("}")
+ code.funcstate.release_temp(temp)
+ self.star_arg.entry.xdecref_cleanup = 0
+ elif self.star_arg:
+ code.put_incref(Naming.args_cname, py_object_type)
+ code.putln("%s = %s;" % (
+ self.star_arg.entry.cname,
+ Naming.args_cname))
+ self.star_arg.entry.xdecref_cleanup = 0
+
+ def generate_tuple_and_keyword_parsing_code(self, args, success_label, code):
+ argtuple_error_label = code.new_label("argtuple_error")
+
+ positional_args = []
+ required_kw_only_args = []
+ optional_kw_only_args = []
+ for arg in args:
+ if arg.is_generic:
+ if arg.default:
+ if not arg.is_self_arg and not arg.is_type_arg:
+ if arg.kw_only:
+ optional_kw_only_args.append(arg)
+ else:
+ positional_args.append(arg)
+ elif arg.kw_only:
+ required_kw_only_args.append(arg)
+ elif not arg.is_self_arg and not arg.is_type_arg:
+ positional_args.append(arg)
+
+ # sort required kw-only args before optional ones to avoid special
+ # cases in the unpacking code
+ kw_only_args = required_kw_only_args + optional_kw_only_args
+
+ min_positional_args = self.num_required_args - self.num_required_kw_args
+ if len(args) > 0 and (args[0].is_self_arg or args[0].is_type_arg):
+ min_positional_args -= 1
+ max_positional_args = len(positional_args)
+ has_fixed_positional_count = not self.star_arg and \
+ min_positional_args == max_positional_args
+ has_kw_only_args = bool(kw_only_args)
+
+ if self.starstar_arg or self.star_arg:
+ self.generate_stararg_init_code(max_positional_args, code)
+
+ code.putln('{')
+ all_args = tuple(positional_args) + tuple(kw_only_args)
+ code.putln("static PyObject **%s[] = {%s,0};" % (
+ Naming.pykwdlist_cname,
+ ','.join(['&%s' % code.intern_identifier(arg.name)
+ for arg in all_args])))
+
+ # Before being converted and assigned to the target variables,
+ # borrowed references to all unpacked argument values are
+ # collected into a local PyObject* array called "values",
+ # regardless if they were taken from default arguments,
+ # positional arguments or keyword arguments. Note that
+ # C-typed default arguments are handled at conversion time,
+ # so their array value is NULL in the end if no argument
+ # was passed for them.
+ self.generate_argument_values_setup_code(all_args, code)
+
+ # --- optimised code when we receive keyword arguments
+ code.putln("if (%s(%s)) {" % (
+ (self.num_required_kw_args > 0) and "likely" or "unlikely",
+ Naming.kwds_cname))
+ self.generate_keyword_unpacking_code(
+ min_positional_args, max_positional_args,
+ has_fixed_positional_count, has_kw_only_args,
+ all_args, argtuple_error_label, code)
+
+ # --- optimised code when we do not receive any keyword arguments
+ if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
+ # Python raises arg tuple related errors first, so we must
+ # check the length here
+ if min_positional_args == max_positional_args and not self.star_arg:
+ compare = '!='
+ else:
+ compare = '<'
+ code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
+ Naming.args_cname, compare, min_positional_args))
+ code.put_goto(argtuple_error_label)
+
+ if self.num_required_kw_args:
+ # pure error case: keywords required but not passed
+ if max_positional_args > min_positional_args and not self.star_arg:
+ code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
+ Naming.args_cname, max_positional_args))
+ code.put_goto(argtuple_error_label)
+ code.putln('} else {')
+ for i, arg in enumerate(kw_only_args):
+ if not arg.default:
+ pystring_cname = code.intern_identifier(arg.name)
+ # required keyword-only argument missing
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
+ self.name,
+ pystring_cname))
+ code.putln(code.error_goto(self.pos))
+ break
+
+ else:
+ # optimised tuple unpacking code
+ code.putln('} else {')
+ if min_positional_args == max_positional_args:
+ # parse the exact number of positional arguments from
+ # the args tuple
+ for i, arg in enumerate(positional_args):
+ code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
+ else:
+ # parse the positional arguments from the variable length
+ # args tuple and reject illegal argument tuple sizes
+ code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
+ if self.star_arg:
+ code.putln('default:')
+ reversed_args = list(enumerate(positional_args))[::-1]
+ for i, arg in reversed_args:
+ if i >= min_positional_args-1:
+ if i != reversed_args[0][0]:
+ code.putln('CYTHON_FALLTHROUGH;')
+ code.put('case %2d: ' % (i+1))
+ code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
+ if min_positional_args == 0:
+ code.putln('CYTHON_FALLTHROUGH;')
+ code.put('case 0: ')
+ code.putln('break;')
+ if self.star_arg:
+ if min_positional_args:
+ for i in range(min_positional_args-1, -1, -1):
+ code.putln('case %2d:' % i)
+ code.put_goto(argtuple_error_label)
+ else:
+ code.put('default: ')
+ code.put_goto(argtuple_error_label)
+ code.putln('}')
+
+ code.putln('}') # end of the conditional unpacking blocks
+
+ # Convert arg values to their final type and assign them.
+ # Also inject non-Python default arguments, which do cannot
+ # live in the values[] array.
+ for i, arg in enumerate(all_args):
+ self.generate_arg_assignment(arg, "values[%d]" % i, code)
+
+ code.putln('}') # end of the whole argument unpacking block
+
+ if code.label_used(argtuple_error_label):
+ code.put_goto(success_label)
+ code.put_label(argtuple_error_label)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
+ code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
+ self.name, has_fixed_positional_count,
+ min_positional_args, max_positional_args,
+ Naming.args_cname))
+ code.putln(code.error_goto(self.pos))
+
+ def generate_arg_assignment(self, arg, item, code):
+ if arg.type.is_pyobject:
+ # Python default arguments were already stored in 'item' at the very beginning
+ if arg.is_generic:
+ item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
+ entry = arg.entry
+ code.putln("%s = %s;" % (entry.cname, item))
+ else:
+ if arg.type.from_py_function:
+ if arg.default:
+ # C-typed default arguments must be handled here
+ code.putln('if (%s) {' % item)
+ code.putln(arg.type.from_py_call_code(
+ item, arg.entry.cname, arg.pos, code))
+ if arg.default:
+ code.putln('} else {')
+ code.putln("%s = %s;" % (
+ arg.entry.cname,
+ arg.calculate_default_value_code(code)))
+ if arg.type.is_memoryviewslice:
+ code.put_incref_memoryviewslice(arg.entry.cname,
+ have_gil=True)
+ code.putln('}')
+ else:
+ error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
+
+ def generate_stararg_init_code(self, max_positional_args, code):
+ if self.starstar_arg:
+ self.starstar_arg.entry.xdecref_cleanup = 0
+ code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
+ self.starstar_arg.entry.cname,
+ self.starstar_arg.entry.cname,
+ self.error_value()))
+ code.put_gotref(self.starstar_arg.entry.cname)
+ if self.star_arg:
+ self.star_arg.entry.xdecref_cleanup = 0
+ code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
+ Naming.args_cname,
+ max_positional_args))
+ code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
+ self.star_arg.entry.cname, Naming.args_cname,
+ max_positional_args, Naming.args_cname))
+ code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
+ if self.starstar_arg:
+ code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
+ code.put_finish_refcount_context()
+ code.putln('return %s;' % self.error_value())
+ code.putln('}')
+ code.put_gotref(self.star_arg.entry.cname)
+ code.putln('} else {')
+ code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
+ code.put_incref(Naming.empty_tuple, py_object_type)
+ code.putln('}')
+
+ def generate_argument_values_setup_code(self, args, code):
+ max_args = len(args)
+ # the 'values' array collects borrowed references to arguments
+ # before doing any type coercion etc.
+ code.putln("PyObject* values[%d] = {%s};" % (
+ max_args, ','.join('0'*max_args)))
+
+ if self.target.defaults_struct:
+ code.putln('%s *%s = __Pyx_CyFunction_Defaults(%s, %s);' % (
+ self.target.defaults_struct, Naming.dynamic_args_cname,
+ self.target.defaults_struct, Naming.self_cname))
+
+ # assign borrowed Python default values to the values array,
+ # so that they can be overwritten by received arguments below
+ for i, arg in enumerate(args):
+ if arg.default and arg.type.is_pyobject:
+ default_value = arg.calculate_default_value_code(code)
+ code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
+
+ def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
+ has_fixed_positional_count, has_kw_only_args,
+ all_args, argtuple_error_label, code):
+ code.putln('Py_ssize_t kw_args;')
+ code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
+ # copy the values from the args tuple and check that it's not too long
+ code.putln('switch (pos_args) {')
+ if self.star_arg:
+ code.putln('default:')
+ for i in range(max_positional_args-1, -1, -1):
+ code.put('case %2d: ' % (i+1))
+ code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
+ i, Naming.args_cname, i))
+ code.putln('CYTHON_FALLTHROUGH;')
+ code.putln('case 0: break;')
+ if not self.star_arg:
+ code.put('default: ') # more arguments than allowed
+ code.put_goto(argtuple_error_label)
+ code.putln('}')
+
+ # The code above is very often (but not always) the same as
+ # the optimised non-kwargs tuple unpacking code, so we keep
+ # the code block above at the very top, before the following
+ # 'external' PyDict_Size() call, to make it easy for the C
+ # compiler to merge the two separate tuple unpacking
+ # implementations into one when they turn out to be identical.
+
+ # If we received kwargs, fill up the positional/required
+ # arguments with values from the kw dict
+ code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
+ if self.num_required_args or max_positional_args > 0:
+ last_required_arg = -1
+ for i, arg in enumerate(all_args):
+ if not arg.default:
+ last_required_arg = i
+ if last_required_arg < max_positional_args:
+ last_required_arg = max_positional_args-1
+ if max_positional_args > 0:
+ code.putln('switch (pos_args) {')
+ for i, arg in enumerate(all_args[:last_required_arg+1]):
+ if max_positional_args > 0 and i <= max_positional_args:
+ if i != 0:
+ code.putln('CYTHON_FALLTHROUGH;')
+ if self.star_arg and i == max_positional_args:
+ code.putln('default:')
+ else:
+ code.putln('case %2d:' % i)
+ pystring_cname = code.intern_identifier(arg.name)
+ if arg.default:
+ if arg.kw_only:
+ # optional kw-only args are handled separately below
+ continue
+ code.putln('if (kw_args > 0) {')
+ # don't overwrite default argument
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
+ Naming.kwds_cname, pystring_cname))
+ code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
+ code.putln('}')
+ else:
+ code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
+ i, Naming.kwds_cname, pystring_cname))
+ if i < min_positional_args:
+ if i == 0:
+ # special case: we know arg 0 is missing
+ code.put('else ')
+ code.put_goto(argtuple_error_label)
+ else:
+ # print the correct number of values (args or
+ # kwargs) that were passed into positional
+ # arguments up to this point
+ code.putln('else {')
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseArgTupleInvalid", "FunctionArguments.c"))
+ code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
+ self.name, has_fixed_positional_count,
+ min_positional_args, max_positional_args, i))
+ code.putln(code.error_goto(self.pos))
+ code.putln('}')
+ elif arg.kw_only:
+ code.putln('else {')
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
+ self.name, pystring_cname))
+ code.putln(code.error_goto(self.pos))
+ code.putln('}')
+ if max_positional_args > 0:
+ code.putln('}')
+
+ if has_kw_only_args:
+ # unpack optional keyword-only arguments separately because
+ # checking for interned strings in a dict is faster than iterating
+ self.generate_optional_kwonly_args_unpacking_code(all_args, code)
+
+ code.putln('if (unlikely(kw_args > 0)) {')
+ # non-positional/-required kw args left in dict: default args,
+ # kw-only args, **kwargs or error
+ #
+ # This is sort of a catch-all: except for checking required
+ # arguments, this will always do the right thing for unpacking
+ # keyword arguments, so that we can concentrate on optimising
+ # common cases above.
+ if max_positional_args == 0:
+ pos_arg_count = "0"
+ elif self.star_arg:
+ code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
+ max_positional_args, max_positional_args))
+ pos_arg_count = "used_pos_args"
+ else:
+ pos_arg_count = "pos_args"
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ParseKeywords", "FunctionArguments.c"))
+ code.putln('if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
+ Naming.kwds_cname,
+ Naming.pykwdlist_cname,
+ self.starstar_arg and self.starstar_arg.entry.cname or '0',
+ pos_arg_count,
+ self.name,
+ code.error_goto(self.pos)))
+ code.putln('}')
+
+ def generate_optional_kwonly_args_unpacking_code(self, all_args, code):
+ optional_args = []
+ first_optional_arg = -1
+ for i, arg in enumerate(all_args):
+ if not arg.kw_only or not arg.default:
+ continue
+ if not optional_args:
+ first_optional_arg = i
+ optional_args.append(arg.name)
+ if optional_args:
+ if len(optional_args) > 1:
+ # if we receive more than the named kwargs, we either have **kwargs
+ # (in which case we must iterate anyway) or it's an error (which we
+ # also handle during iteration) => skip this part if there are more
+ code.putln('if (kw_args > 0 && %s(kw_args <= %d)) {' % (
+ not self.starstar_arg and 'likely' or '',
+ len(optional_args)))
+ code.putln('Py_ssize_t index;')
+ # not unrolling the loop here reduces the C code overhead
+ code.putln('for (index = %d; index < %d && kw_args > 0; index++) {' % (
+ first_optional_arg, first_optional_arg + len(optional_args)))
+ else:
+ code.putln('if (kw_args == 1) {')
+ code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
+ Naming.kwds_cname, Naming.pykwdlist_cname))
+ code.putln('if (value) { values[index] = value; kw_args--; }')
+ if len(optional_args) > 1:
+ code.putln('}')
+ code.putln('}')
+
+ def generate_argument_conversion_code(self, code):
+ # Generate code to convert arguments from signature type to
+ # declared type, if needed. Also copies signature arguments
+ # into closure fields.
+ for arg in self.args:
+ if arg.needs_conversion:
+ self.generate_arg_conversion(arg, code)
+
+ def generate_arg_conversion(self, arg, code):
+ # Generate conversion code for one argument.
+ old_type = arg.hdr_type
+ new_type = arg.type
+ if old_type.is_pyobject:
+ if arg.default:
+ code.putln("if (%s) {" % arg.hdr_cname)
+ else:
+ code.putln("assert(%s); {" % arg.hdr_cname)
+ self.generate_arg_conversion_from_pyobject(arg, code)
+ code.putln("}")
+ elif new_type.is_pyobject:
+ self.generate_arg_conversion_to_pyobject(arg, code)
+ else:
+ if new_type.assignable_from(old_type):
+ code.putln("%s = %s;" % (arg.entry.cname, arg.hdr_cname))
+ else:
+ error(arg.pos, "Cannot convert 1 argument from '%s' to '%s'" % (old_type, new_type))
+
+ def generate_arg_conversion_from_pyobject(self, arg, code):
+ new_type = arg.type
+ # copied from CoerceFromPyTypeNode
+ if new_type.from_py_function:
+ code.putln(new_type.from_py_call_code(
+ arg.hdr_cname,
+ arg.entry.cname,
+ arg.pos,
+ code,
+ ))
+ else:
+ error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
+
+ def generate_arg_conversion_to_pyobject(self, arg, code):
+ old_type = arg.hdr_type
+ func = old_type.to_py_function
+ if func:
+ code.putln("%s = %s(%s); %s" % (
+ arg.entry.cname,
+ func,
+ arg.hdr_cname,
+ code.error_goto_if_null(arg.entry.cname, arg.pos)))
+ code.put_var_gotref(arg.entry)
+ else:
+ error(arg.pos, "Cannot convert argument of type '%s' to Python object" % old_type)
+
+ def generate_argument_type_tests(self, code):
+ # Generate type tests for args whose signature
+ # type is PyObject * and whose declared type is
+ # a subtype thereof.
+ for arg in self.args:
+ if arg.needs_type_test:
+ self.generate_arg_type_test(arg, code)
+ elif not arg.accept_none and (arg.type.is_pyobject or
+ arg.type.is_buffer or
+ arg.type.is_memoryviewslice):
+ self.generate_arg_none_check(arg, code)
+
+ def error_value(self):
+ return self.signature.error_value
+
+
+class GeneratorDefNode(DefNode):
+ # Generator function node that creates a new generator instance when called.
+ #
+ # gbody GeneratorBodyDefNode the function implementing the generator
+ #
+
+ is_generator = True
+ is_coroutine = False
+ is_iterable_coroutine = False
+ is_asyncgen = False
+ gen_type_name = 'Generator'
+ needs_closure = True
+
+ child_attrs = DefNode.child_attrs + ["gbody"]
+
+ def __init__(self, pos, **kwargs):
+ # XXX: don't actually needs a body
+ kwargs['body'] = StatListNode(pos, stats=[], is_terminator=True)
+ super(GeneratorDefNode, self).__init__(pos, **kwargs)
+
+ def analyse_declarations(self, env):
+ super(GeneratorDefNode, self).analyse_declarations(env)
+ self.gbody.local_scope = self.local_scope
+ self.gbody.analyse_declarations(env)
+
+ def generate_function_body(self, env, code):
+ body_cname = self.gbody.entry.func_cname
+ name = code.intern_identifier(self.name)
+ qualname = code.intern_identifier(self.qualname)
+ module_name = code.intern_identifier(self.module_name)
+
+ code.putln('{')
+ code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
+ '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
+ self.gen_type_name,
+ body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
+ Naming.cur_scope_cname, name, qualname, module_name,
+ code.error_goto_if_null('gen', self.pos)))
+ code.put_decref(Naming.cur_scope_cname, py_object_type)
+ if self.requires_classobj:
+ classobj_cname = 'gen->classobj'
+ code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
+ classobj_cname, Naming.self_cname))
+ code.put_incref(classobj_cname, py_object_type)
+ code.put_giveref(classobj_cname)
+ code.put_finish_refcount_context()
+ code.putln('return (PyObject *) gen;')
+ code.putln('}')
+
+ def generate_function_definitions(self, env, code):
+ env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c"))
+ self.gbody.generate_function_header(code, proto=True)
+ super(GeneratorDefNode, self).generate_function_definitions(env, code)
+ self.gbody.generate_function_definitions(env, code)
+
+
+class AsyncDefNode(GeneratorDefNode):
+ gen_type_name = 'Coroutine'
+ is_coroutine = True
+
+
+class IterableAsyncDefNode(AsyncDefNode):
+ gen_type_name = 'IterableCoroutine'
+ is_iterable_coroutine = True
+
+
+class AsyncGenNode(AsyncDefNode):
+ gen_type_name = 'AsyncGen'
+ is_asyncgen = True
+
+
+class GeneratorBodyDefNode(DefNode):
+ # Main code body of a generator implemented as a DefNode.
+ #
+
+ is_generator_body = True
+ is_inlined = False
+ is_async_gen_body = False
+ inlined_comprehension_type = None # container type for inlined comprehensions
+
+ def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False):
+ super(GeneratorBodyDefNode, self).__init__(
+ pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body,
+ doc=None, args=[], star_arg=None, starstar_arg=None)
+
+ def declare_generator_body(self, env):
+ prefix = env.next_id(env.scope_prefix)
+ name = env.next_id('generator')
+ cname = Naming.genbody_prefix + prefix + name
+ entry = env.declare_var(None, py_object_type, self.pos,
+ cname=cname, visibility='private')
+ entry.func_cname = cname
+ entry.qualified_name = EncodedString(self.name)
+ # Work-around for https://github.com/cython/cython/issues/1699
+ # We don't currently determine whether the generator entry is used or not,
+ # so mark it as used to avoid false warnings.
+ entry.used = True
+ self.entry = entry
+
+ def analyse_declarations(self, env):
+ self.analyse_argument_types(env)
+ self.declare_generator_body(env)
+
+ def generate_function_header(self, code, proto=False):
+ header = "static PyObject *%s(PyObject *%s_obj, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % (
+ self.entry.func_cname,
+ Naming.generator_cname,
+ Naming.local_tstate_cname,
+ Naming.sent_value_cname)
+ if proto:
+ code.putln('%s; /* proto */' % header)
+ else:
+ code.putln('%s /* generator body */\n{' % header)
+
+ def generate_function_definitions(self, env, code):
+ lenv = self.local_scope
+
+ # Generate closure function definitions
+ self.body.generate_function_definitions(lenv, code)
+
+ # Generate C code for header and body of function
+ code.enter_cfunc_scope(lenv)
+ code.return_from_error_cleanup_label = code.new_label()
+
+ # ----- Top-level constants used by this function
+ code.mark_pos(self.pos)
+ self.generate_cached_builtins_decls(lenv, code)
+ # ----- Function header
+ code.putln("")
+ self.generate_function_header(code)
+ code.putln("__pyx_CoroutineObject *%s = (__pyx_CoroutineObject *)%s_obj;" % (Naming.generator_cname, Naming.generator_cname))
+ closure_init_code = code.insertion_point()
+ # ----- Local variables
+ code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
+ tempvardecl_code = code.insertion_point()
+ code.put_declare_refcount_context()
+ code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ tempvardecl_code.put_trace_declarations()
+ code.funcstate.can_trace = True
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
+
+ # ----- Resume switch point.
+ code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
+ resume_code = code.insertion_point()
+ first_run_label = code.new_label('first_run')
+ code.use_label(first_run_label)
+ code.put_label(first_run_label)
+ code.putln('%s' %
+ (code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
+
+ # ----- prepare target container for inlined comprehension
+ if self.is_inlined and self.inlined_comprehension_type is not None:
+ target_type = self.inlined_comprehension_type
+ if target_type is Builtin.list_type:
+ comp_init = 'PyList_New(0)'
+ elif target_type is Builtin.set_type:
+ comp_init = 'PySet_New(NULL)'
+ elif target_type is Builtin.dict_type:
+ comp_init = 'PyDict_New()'
+ else:
+ raise InternalError(
+ "invalid type of inlined comprehension: %s" % target_type)
+ code.putln("%s = %s; %s" % (
+ Naming.retval_cname, comp_init,
+ code.error_goto_if_null(Naming.retval_cname, self.pos)))
+ code.put_gotref(Naming.retval_cname)
+
+ # ----- Function body
+ self.generate_function_body(env, code)
+ # ----- Closure initialization
+ if lenv.scope_class.type.scope.var_entries:
+ closure_init_code.putln('%s = %s;' % (
+ lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
+ lenv.scope_class.type.cast_code('%s->closure' %
+ Naming.generator_cname)))
+ # FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
+ code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
+
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+
+ code.mark_pos(self.pos)
+ code.putln("")
+ code.putln("/* function exit code */")
+
+ # on normal generator termination, we do not take the exception propagation
+ # path: no traceback info is required and not creating it is much faster
+ if not self.is_inlined and not self.body.is_terminator:
+ if self.is_async_gen_body:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.putln('PyErr_SetNone(%s);' % (
+ '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
+ # ----- Error cleanup
+ if code.label_used(code.error_label):
+ if not self.body.is_terminator:
+ code.put_goto(code.return_label)
+ code.put_label(code.error_label)
+ if self.is_inlined and self.inlined_comprehension_type is not None:
+ code.put_xdecref_clear(Naming.retval_cname, py_object_type)
+ if Future.generator_stop in env.global_scope().context.future_directives:
+ # PEP 479: turn accidental StopIteration exceptions into a RuntimeError
+ code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
+ code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body))
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.put_add_traceback(self.entry.qualified_name)
+
+ # ----- Non-error return cleanup
+ code.put_label(code.return_label)
+ if self.is_inlined:
+ code.put_xgiveref(Naming.retval_cname)
+ else:
+ code.put_xdecref_clear(Naming.retval_cname, py_object_type)
+ # For Py3.7, clearing is already done below.
+ code.putln("#if !CYTHON_USE_EXC_INFO_STACK")
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+ code.putln("#endif")
+ code.putln('%s->resume_label = -1;' % Naming.generator_cname)
+ # clean up as early as possible to help breaking any reference cycles
+ code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
+ code.put_finish_refcount_context()
+ code.putln("return %s;" % Naming.retval_cname)
+ code.putln("}")
+
+ # ----- Go back and insert temp variable declarations
+ tempvardecl_code.put_temp_declarations(code.funcstate)
+ # ----- Generator resume code
+ if profile or linetrace:
+ resume_code.put_trace_call(self.entry.qualified_name, self.pos,
+ nogil=not code.funcstate.gil_owned)
+ resume_code.putln("switch (%s->resume_label) {" % (
+ Naming.generator_cname))
+
+ resume_code.putln("case 0: goto %s;" % first_run_label)
+
+ for i, label in code.yield_labels:
+ resume_code.putln("case %d: goto %s;" % (i, label))
+ resume_code.putln("default: /* CPython raises the right error here */")
+ if profile or linetrace:
+ resume_code.put_trace_return("Py_None",
+ nogil=not code.funcstate.gil_owned)
+ resume_code.put_finish_refcount_context()
+ resume_code.putln("return NULL;")
+ resume_code.putln("}")
+
+ code.exit_cfunc_scope()
+
+
+class OverrideCheckNode(StatNode):
+ # A Node for dispatching to the def method if it
+ # is overridden.
+ #
+ # py_func
+ #
+ # args
+ # func_temp
+ # body
+
+ child_attrs = ['body']
+
+ body = None
+
+ def analyse_expressions(self, env):
+ self.args = env.arg_entries
+ if self.py_func.is_module_scope:
+ first_arg = 0
+ else:
+ first_arg = 1
+ from . import ExprNodes
+ self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
+ call_node = ExprNodes.SimpleCallNode(
+ self.pos, function=self.func_node,
+ args=[ExprNodes.NameNode(self.pos, name=arg.name)
+ for arg in self.args[first_arg:]])
+ if env.return_type.is_void or env.return_type.is_returncode:
+ self.body = StatListNode(self.pos, stats=[
+ ExprStatNode(self.pos, expr=call_node),
+ ReturnStatNode(self.pos, value=None)])
+ else:
+ self.body = ReturnStatNode(self.pos, value=call_node)
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code):
+ interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
+ # Check to see if we are an extension type
+ if self.py_func.is_module_scope:
+ self_arg = "((PyObject *)%s)" % Naming.module_cname
+ else:
+ self_arg = "((PyObject *)%s)" % self.args[0].cname
+ code.putln("/* Check if called by wrapper */")
+ code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
+ code.putln("/* Check if overridden in Python */")
+ if self.py_func.is_module_scope:
+ code.putln("else {")
+ else:
+ code.putln("else if (unlikely((Py_TYPE(%s)->tp_dictoffset != 0)"
+ " || (Py_TYPE(%s)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) {" % (
+ self_arg, self_arg))
+
+ code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyDictVersioning", "ObjectHandling.c"))
+ # TODO: remove the object dict version check by 'inlining' the getattr implementation for methods.
+ # This would allow checking the dict versions around _PyType_Lookup() if it returns a descriptor,
+ # and would (tada!) make this check a pure type based thing instead of supporting only a single
+ # instance at a time.
+ code.putln("static PY_UINT64_T %s = __PYX_DICT_VERSION_INIT, %s = __PYX_DICT_VERSION_INIT;" % (
+ Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
+ code.putln("if (unlikely(!__Pyx_object_dict_version_matches(%s, %s, %s))) {" % (
+ self_arg, Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
+ code.putln("PY_UINT64_T %s = __Pyx_get_tp_dict_version(%s);" % (
+ Naming.type_dict_guard_temp, self_arg))
+ code.putln("#endif")
+
+ func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ self.func_node.set_cname(func_node_temp)
+ # need to get attribute manually--scope would return cdef method
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
+ err = code.error_goto_if_null(func_node_temp, self.pos)
+ code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
+ func_node_temp, self_arg, interned_attr_cname, err))
+ code.put_gotref(func_node_temp)
+
+ is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
+ is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (PyCFunction)(void*)%s)" % (
+ func_node_temp, self.py_func.entry.func_cname)
+ code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
+ self.body.generate_execution_code(code)
+ code.putln("}")
+
+ # NOTE: it's not 100% sure that we catch the exact versions here that were used for the lookup,
+ # but it is very unlikely that the versions change during lookup, and the type dict safe guard
+ # should increase the chance of detecting such a case.
+ code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
+ code.putln("%s = __Pyx_get_tp_dict_version(%s);" % (
+ Naming.tp_dict_version_temp, self_arg))
+ code.putln("%s = __Pyx_get_object_dict_version(%s);" % (
+ Naming.obj_dict_version_temp, self_arg))
+ # Safety check that the type dict didn't change during the lookup. Since CPython looks up the
+ # attribute (descriptor) first in the type dict and then in the instance dict or through the
+ # descriptor, the only really far-away lookup when we get here is one in the type dict. So we
+ # double check the type dict version before and afterwards to guard against later changes of
+ # the type dict during the lookup process.
+ code.putln("if (unlikely(%s != %s)) {" % (
+ Naming.type_dict_guard_temp, Naming.tp_dict_version_temp))
+ code.putln("%s = %s = __PYX_DICT_VERSION_INIT;" % (
+ Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
+ code.putln("}")
+ code.putln("#endif")
+
+ code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
+ code.funcstate.release_temp(func_node_temp)
+
+ code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
+ code.putln("}")
+ code.putln("#endif")
+
+ code.putln("}")
+
+
+class ClassDefNode(StatNode, BlockNode):
+ pass
+
+
+class PyClassDefNode(ClassDefNode):
+ # A Python class definition.
+ #
+ # name EncodedString Name of the class
+ # doc string or None
+ # body StatNode Attribute definition code
+ # entry Symtab.Entry
+ # scope PyClassScope
+ # decorators [DecoratorNode] list of decorators or None
+ #
+ # The following subnodes are constructed internally:
+ #
+ # dict DictNode Class dictionary or Py3 namespace
+ # classobj ClassNode Class object
+ # target NameNode Variable to assign class object to
+
+ child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result",
+ "target", "class_cell", "decorators"]
+ decorators = None
+ class_result = None
+ is_py3_style_class = False # Python3 style class (kwargs)
+ metaclass = None
+ mkw = None
+
+ def __init__(self, pos, name, bases, doc, body, decorators=None,
+ keyword_args=None, force_py3_semantics=False):
+ StatNode.__init__(self, pos)
+ self.name = name
+ self.doc = doc
+ self.body = body
+ self.decorators = decorators
+ self.bases = bases
+ from . import ExprNodes
+ if self.doc and Options.docstrings:
+ doc = embed_position(self.pos, self.doc)
+ doc_node = ExprNodes.StringNode(pos, value=doc)
+ else:
+ doc_node = None
+
+ allow_py2_metaclass = not force_py3_semantics
+ if keyword_args:
+ allow_py2_metaclass = False
+ self.is_py3_style_class = True
+ if keyword_args.is_dict_literal:
+ if keyword_args.key_value_pairs:
+ for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
+ if item.key.value == 'metaclass':
+ if self.metaclass is not None:
+ error(item.pos, "keyword argument 'metaclass' passed multiple times")
+ # special case: we already know the metaclass,
+ # so we don't need to do the "build kwargs,
+ # find metaclass" dance at runtime
+ self.metaclass = item.value
+ del keyword_args.key_value_pairs[i]
+ self.mkw = keyword_args
+ else:
+ assert self.metaclass is not None
+ else:
+ # MergedDictNode
+ self.mkw = ExprNodes.ProxyNode(keyword_args)
+
+ if force_py3_semantics or self.bases or self.mkw or self.metaclass:
+ if self.metaclass is None:
+ if keyword_args and not keyword_args.is_dict_literal:
+ # **kwargs may contain 'metaclass' arg
+ mkdict = self.mkw
+ else:
+ mkdict = None
+ if (not mkdict and
+ self.bases.is_sequence_constructor and
+ not self.bases.args):
+ pass # no base classes => no inherited metaclass
+ else:
+ self.metaclass = ExprNodes.PyClassMetaclassNode(
+ pos, class_def_node=self)
+ needs_metaclass_calculation = False
+ else:
+ needs_metaclass_calculation = True
+
+ self.dict = ExprNodes.PyClassNamespaceNode(
+ pos, name=name, doc=doc_node, class_def_node=self)
+ self.classobj = ExprNodes.Py3ClassNode(
+ pos, name=name, class_def_node=self, doc=doc_node,
+ calculate_metaclass=needs_metaclass_calculation,
+ allow_py2_metaclass=allow_py2_metaclass)
+ else:
+ # no bases, no metaclass => old style class creation
+ self.dict = ExprNodes.DictNode(pos, key_value_pairs=[])
+ self.classobj = ExprNodes.ClassNode(
+ pos, name=name, class_def_node=self, doc=doc_node)
+
+ self.target = ExprNodes.NameNode(pos, name=name)
+ self.class_cell = ExprNodes.ClassCellInjectorNode(self.pos)
+
+ def as_cclass(self):
+ """
+ Return this node as if it were declared as an extension class
+ """
+ if self.is_py3_style_class:
+ error(self.classobj.pos, "Python3 style class could not be represented as C class")
+ return
+
+ from . import ExprNodes
+ return CClassDefNode(self.pos,
+ visibility='private',
+ module_name=None,
+ class_name=self.name,
+ bases=self.bases or ExprNodes.TupleNode(self.pos, args=[]),
+ decorators=self.decorators,
+ body=self.body,
+ in_pxd=False,
+ doc=self.doc)
+
+ def create_scope(self, env):
+ genv = env
+ while genv.is_py_class_scope or genv.is_c_class_scope:
+ genv = genv.outer_scope
+ cenv = self.scope = PyClassScope(name=self.name, outer_scope=genv)
+ return cenv
+
+ def analyse_declarations(self, env):
+ class_result = self.classobj
+ if self.decorators:
+ from .ExprNodes import SimpleCallNode
+ for decorator in self.decorators[::-1]:
+ class_result = SimpleCallNode(
+ decorator.pos,
+ function=decorator.decorator,
+ args=[class_result])
+ self.decorators = None
+ self.class_result = class_result
+ if self.bases:
+ self.bases.analyse_declarations(env)
+ if self.mkw:
+ self.mkw.analyse_declarations(env)
+ self.class_result.analyse_declarations(env)
+ self.target.analyse_target_declaration(env)
+ cenv = self.create_scope(env)
+ cenv.directives = env.directives
+ cenv.class_obj_cname = self.target.entry.cname
+ self.body.analyse_declarations(cenv)
+
+ def analyse_expressions(self, env):
+ if self.bases:
+ self.bases = self.bases.analyse_expressions(env)
+ if self.mkw:
+ self.mkw = self.mkw.analyse_expressions(env)
+ if self.metaclass:
+ self.metaclass = self.metaclass.analyse_expressions(env)
+ self.dict = self.dict.analyse_expressions(env)
+ self.class_result = self.class_result.analyse_expressions(env)
+ cenv = self.scope
+ self.body = self.body.analyse_expressions(cenv)
+ self.target.analyse_target_expression(env, self.classobj)
+ self.class_cell = self.class_cell.analyse_expressions(cenv)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.generate_lambda_definitions(self.scope, code)
+ self.body.generate_function_definitions(self.scope, code)
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ code.pyclass_stack.append(self)
+ cenv = self.scope
+ if self.bases:
+ self.bases.generate_evaluation_code(code)
+ if self.mkw:
+ self.mkw.generate_evaluation_code(code)
+ if self.metaclass:
+ self.metaclass.generate_evaluation_code(code)
+ self.dict.generate_evaluation_code(code)
+ cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
+
+ class_cell = self.class_cell
+ if class_cell is not None and not class_cell.is_active:
+ class_cell = None
+
+ if class_cell is not None:
+ class_cell.generate_evaluation_code(code)
+ self.body.generate_execution_code(code)
+ self.class_result.generate_evaluation_code(code)
+ if class_cell is not None:
+ class_cell.generate_injection_code(
+ code, self.class_result.result())
+ if class_cell is not None:
+ class_cell.generate_disposal_code(code)
+ class_cell.free_temps(code)
+
+ cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
+ self.target.generate_assignment_code(self.class_result, code)
+ self.dict.generate_disposal_code(code)
+ self.dict.free_temps(code)
+ if self.metaclass:
+ self.metaclass.generate_disposal_code(code)
+ self.metaclass.free_temps(code)
+ if self.mkw:
+ self.mkw.generate_disposal_code(code)
+ self.mkw.free_temps(code)
+ if self.bases:
+ self.bases.generate_disposal_code(code)
+ self.bases.free_temps(code)
+ code.pyclass_stack.pop()
+
+
+class CClassDefNode(ClassDefNode):
+ # An extension type definition.
+ #
+ # visibility 'private' or 'public' or 'extern'
+ # typedef_flag boolean
+ # api boolean
+ # module_name string or None For import of extern type objects
+ # class_name string Unqualified name of class
+ # as_name string or None Name to declare as in this scope
+ # bases TupleNode Base class(es)
+ # objstruct_name string or None Specified C name of object struct
+ # typeobj_name string or None Specified C name of type object
+ # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
+ # in_pxd boolean Is in a .pxd file
+ # decorators [DecoratorNode] list of decorators or None
+ # doc string or None
+ # body StatNode or None
+ # entry Symtab.Entry
+ # base_type PyExtensionType or None
+ # buffer_defaults_node DictNode or None Declares defaults for a buffer
+ # buffer_defaults_pos
+
+ child_attrs = ["body"]
+ buffer_defaults_node = None
+ buffer_defaults_pos = None
+ typedef_flag = False
+ api = False
+ objstruct_name = None
+ typeobj_name = None
+ check_size = None
+ decorators = None
+ shadow = False
+
+ def buffer_defaults(self, env):
+ if not hasattr(self, '_buffer_defaults'):
+ from . import Buffer
+ if self.buffer_defaults_node:
+ self._buffer_defaults = Buffer.analyse_buffer_options(
+ self.buffer_defaults_pos,
+ env, [], self.buffer_defaults_node,
+ need_complete=False)
+ else:
+ self._buffer_defaults = None
+ return self._buffer_defaults
+
+ def declare(self, env):
+ if self.module_name and self.visibility != 'extern':
+ module_path = self.module_name.split(".")
+ home_scope = env.find_imported_module(module_path, self.pos)
+ if not home_scope:
+ return None
+ else:
+ home_scope = env
+
+ self.entry = home_scope.declare_c_class(
+ name=self.class_name,
+ pos=self.pos,
+ defining=0,
+ implementing=0,
+ module_name=self.module_name,
+ base_type=None,
+ objstruct_cname=self.objstruct_name,
+ typeobj_cname=self.typeobj_name,
+ visibility=self.visibility,
+ typedef_flag=self.typedef_flag,
+ check_size = self.check_size,
+ api=self.api,
+ buffer_defaults=self.buffer_defaults(env),
+ shadow=self.shadow)
+
+ def analyse_declarations(self, env):
+ #print "CClassDefNode.analyse_declarations:", self.class_name
+ #print "...visibility =", self.visibility
+ #print "...module_name =", self.module_name
+
+ if env.in_cinclude and not self.objstruct_name:
+ error(self.pos, "Object struct name specification required for C class defined in 'extern from' block")
+ if self.decorators:
+ error(self.pos, "Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
+ self.base_type = None
+ # Now that module imports are cached, we need to
+ # import the modules for extern classes.
+ if self.module_name:
+ self.module = None
+ for module in env.cimported_modules:
+ if module.name == self.module_name:
+ self.module = module
+ if self.module is None:
+ self.module = ModuleScope(self.module_name, None, env.context)
+ self.module.has_extern_class = 1
+ env.add_imported_module(self.module)
+
+ if self.bases.args:
+ base = self.bases.args[0]
+ base_type = base.analyse_as_type(env)
+ if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
+ # Use the Python rather than C variant of these types.
+ base_type = env.lookup(base_type.sign_and_name()).type
+ if base_type is None:
+ error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
+ elif base_type == PyrexTypes.py_object_type:
+ base_class_scope = None
+ elif not base_type.is_extension_type and \
+ not (base_type.is_builtin_type and base_type.objstruct_cname):
+ error(base.pos, "'%s' is not an extension type" % base_type)
+ elif not base_type.is_complete():
+ error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
+ base_type.name, self.class_name))
+ elif base_type.scope and base_type.scope.directives and \
+ base_type.is_final_type:
+ error(base.pos, "Base class '%s' of type '%s' is final" % (
+ base_type, self.class_name))
+ elif base_type.is_builtin_type and \
+ base_type.name in ('tuple', 'str', 'bytes'):
+ error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
+ % base_type.name)
+ else:
+ self.base_type = base_type
+ if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
+ warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
+
+ has_body = self.body is not None
+ if has_body and self.base_type and not self.base_type.scope:
+ # To properly initialize inherited attributes, the base type must
+ # be analysed before this type.
+ self.base_type.defered_declarations.append(lambda : self.analyse_declarations(env))
+ return
+
+ if self.module_name and self.visibility != 'extern':
+ module_path = self.module_name.split(".")
+ home_scope = env.find_imported_module(module_path, self.pos)
+ if not home_scope:
+ return
+ else:
+ home_scope = env
+
+ if self.visibility == 'extern':
+ if (self.module_name == '__builtin__' and
+ self.class_name in Builtin.builtin_types and
+ env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
+ warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
+
+ self.entry = home_scope.declare_c_class(
+ name=self.class_name,
+ pos=self.pos,
+ defining=has_body and self.in_pxd,
+ implementing=has_body and not self.in_pxd,
+ module_name=self.module_name,
+ base_type=self.base_type,
+ objstruct_cname=self.objstruct_name,
+ typeobj_cname=self.typeobj_name,
+ check_size=self.check_size,
+ visibility=self.visibility,
+ typedef_flag=self.typedef_flag,
+ api=self.api,
+ buffer_defaults=self.buffer_defaults(env),
+ shadow=self.shadow)
+
+ if self.shadow:
+ home_scope.lookup(self.class_name).as_variable = self.entry
+ if home_scope is not env and self.visibility == 'extern':
+ env.add_imported_entry(self.class_name, self.entry, self.pos)
+ self.scope = scope = self.entry.type.scope
+ if scope is not None:
+ scope.directives = env.directives
+
+ if self.doc and Options.docstrings:
+ scope.doc = embed_position(self.pos, self.doc)
+
+ if has_body:
+ self.body.analyse_declarations(scope)
+ dict_entry = self.scope.lookup_here("__dict__")
+ if dict_entry and dict_entry.is_variable and (not scope.defined and not scope.implemented):
+ dict_entry.getter_cname = self.scope.mangle_internal("__dict__getter")
+ self.scope.declare_property("__dict__", dict_entry.doc, dict_entry.pos)
+ if self.in_pxd:
+ scope.defined = 1
+ else:
+ scope.implemented = 1
+
+ if len(self.bases.args) > 1:
+ if not has_body or self.in_pxd:
+ error(self.bases.args[1].pos, "Only declare first base in declaration.")
+ # At runtime, we check that the other bases are heap types
+ # and that a __dict__ is added if required.
+ for other_base in self.bases.args[1:]:
+ if other_base.analyse_as_type(env):
+ error(other_base.pos, "Only one extension type base class allowed.")
+ self.entry.type.early_init = 0
+ from . import ExprNodes
+ self.type_init_args = ExprNodes.TupleNode(
+ self.pos,
+ args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
+ self.bases,
+ ExprNodes.DictNode(self.pos, key_value_pairs=[])])
+ elif self.base_type:
+ self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
+ self.type_init_args = None
+ else:
+ self.entry.type.early_init = 1
+ self.type_init_args = None
+
+ env.allocate_vtable_names(self.entry)
+
+ for thunk in self.entry.type.defered_declarations:
+ thunk()
+
+ def analyse_expressions(self, env):
+ if self.body:
+ scope = self.entry.type.scope
+ self.body = self.body.analyse_expressions(scope)
+ if self.type_init_args:
+ self.type_init_args.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ if self.body:
+ self.generate_lambda_definitions(self.scope, code)
+ self.body.generate_function_definitions(self.scope, code)
+
+ def generate_execution_code(self, code):
+ # This is needed to generate evaluation code for
+ # default values of method arguments.
+ code.mark_pos(self.pos)
+ if self.body:
+ self.body.generate_execution_code(code)
+ if not self.entry.type.early_init:
+ if self.type_init_args:
+ self.type_init_args.generate_evaluation_code(code)
+ bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
+ # Let Python do the base types compatibility checking.
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
+ code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type, self.type_init_args.result()))
+ code.putln(code.error_goto_if_null(trial_type, self.pos))
+ code.put_gotref(trial_type)
+ code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ trial_type, first_base))
+ code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
+ code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
+ trial_type, first_base))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(trial_type)
+ code.put_incref(bases, PyrexTypes.py_object_type)
+ code.put_giveref(bases)
+ code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+ code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ self.type_init_args.generate_disposal_code(code)
+ self.type_init_args.free_temps(code)
+
+ self.generate_type_ready_code(self.entry, code, True)
+
+ # Also called from ModuleNode for early init types.
+ @staticmethod
+ def generate_type_ready_code(entry, code, heap_type_bases=False):
+ # Generate a call to PyType_Ready for an extension
+ # type defined in this module.
+ type = entry.type
+ typeobj_cname = type.typeobj_cname
+ scope = type.scope
+ if not scope: # could be None if there was an error
+ return
+ if entry.visibility != 'extern':
+ for slot in TypeSlots.slot_table:
+ slot.generate_dynamic_init_code(scope, code)
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ readyfunc = "__Pyx_PyType_Ready"
+ else:
+ readyfunc = "PyType_Ready"
+ code.putln(
+ "if (%s(&%s) < 0) %s" % (
+ readyfunc,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Don't inherit tp_print from builtin types, restoring the
+ # behavior of using tp_repr or tp_str instead.
+ # ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1)
+ code.putln("#if PY_VERSION_HEX < 0x030800B1")
+ code.putln("%s.tp_print = 0;" % typeobj_cname)
+ code.putln("#endif")
+
+ # Use specialised attribute lookup for types with generic lookup but no instance dict.
+ getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
+ dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
+ if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ if type.is_final_type:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
+ utility_func = "PyObject_GenericGetAttrNoDict"
+ else:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ utility_func = "PyObject_GenericGetAttr"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
+
+ code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
+ " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeobj_cname, typeobj_cname))
+ code.putln("%s.tp_getattro = %s;" % (
+ typeobj_cname, py_cfunc))
+ code.putln("}")
+
+ # Fix special method docstrings. This is a bit of a hack, but
+ # unless we let PyType_Ready create the slot wrappers we have
+ # a significant performance hit. (See trac #561.)
+ for func in entry.type.scope.pyfunc_entries:
+ is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
+ if (func.is_special and Options.docstrings and
+ func.wrapperbase_cname and not is_buffer):
+ slot = TypeSlots.method_name_to_slot.get(func.name)
+ preprocessor_guard = slot.preprocessor_guard_code() if slot else None
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+ code.putln('#if CYTHON_UPDATE_DESCRIPTOR_DOC')
+ code.putln("{")
+ code.putln(
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
+ typeobj_cname,
+ func.name,
+ code.error_goto_if_null('wrapper', entry.pos)))
+ code.putln(
+ "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ code.putln(
+ "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
+ func.wrapperbase_cname))
+ code.putln(
+ "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
+ code.putln(
+ "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
+ func.wrapperbase_cname))
+ code.putln("}")
+ code.putln("}")
+ code.putln('#endif')
+ if preprocessor_guard:
+ code.putln('#endif')
+ if type.vtable_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
+ code.putln(
+ "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
+ typeobj_cname,
+ type.vtabptr_cname,
+ code.error_goto(entry.pos)))
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ if not type.scope.is_internal and not type.scope.directives.get('internal'):
+ # scope.is_internal is set for types defined by
+ # Cython (such as closures), the 'internal'
+ # directive is set by users
+ code.putln(
+ 'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % (
+ Naming.module_cname,
+ code.intern_identifier(scope.class_name),
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_entry:
+ if weakref_entry.type is py_object_type:
+ tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
+ tp_weaklistoffset,
+ tp_weaklistoffset,
+ objstruct,
+ weakref_entry.cname))
+ else:
+ error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+ if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
+ # Unfortunately, we cannot reliably detect whether a
+ # superclass defined __reduce__ at compile time, so we must
+ # do so at runtime.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
+ code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Generate code to initialise the typeptr of an extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.typeptr_cname, type.typeobj_cname))
+
+ def annotate(self, code):
+ if self.type_init_args:
+ self.type_init_args.annotate(code)
+ if self.body:
+ self.body.annotate(code)
+
+
+class PropertyNode(StatNode):
+ # Definition of a property in an extension type.
+ #
+ # name string
+ # doc EncodedString or None Doc string
+ # entry Symtab.Entry
+ # body StatListNode
+
+ child_attrs = ["body"]
+
+ def analyse_declarations(self, env):
+ self.entry = env.declare_property(self.name, self.doc, self.pos)
+ self.entry.scope.directives = env.directives
+ self.body.analyse_declarations(self.entry.scope)
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ pass
+
+ def annotate(self, code):
+ self.body.annotate(code)
+
+
+class GlobalNode(StatNode):
+ # Global variable declaration.
+ #
+ # names [string]
+
+ child_attrs = []
+
+ def analyse_declarations(self, env):
+ for name in self.names:
+ env.declare_global(name, self.pos)
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class NonlocalNode(StatNode):
+ # Nonlocal variable declaration via the 'nonlocal' keyword.
+ #
+ # names [string]
+
+ child_attrs = []
+
+ def analyse_declarations(self, env):
+ for name in self.names:
+ env.declare_nonlocal(name, self.pos)
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class ExprStatNode(StatNode):
+ # Expression used as a statement.
+ #
+ # expr ExprNode
+
+ child_attrs = ["expr"]
+
+ def analyse_declarations(self, env):
+ from . import ExprNodes
+ expr = self.expr
+ if isinstance(expr, ExprNodes.GeneralCallNode):
+ func = expr.function.as_cython_attribute()
+ if func == u'declare':
+ args, kwds = expr.explicit_args_kwds()
+ if len(args):
+ error(expr.pos, "Variable names must be specified.")
+ for var, type_node in kwds.key_value_pairs:
+ type = type_node.analyse_as_type(env)
+ if type is None:
+ error(type_node.pos, "Unknown type")
+ else:
+ env.declare_var(var.value, type, var.pos, is_cdef=True)
+ self.__class__ = PassStatNode
+ elif getattr(expr, 'annotation', None) is not None:
+ if expr.is_name:
+ # non-code variable annotation, e.g. "name: type"
+ expr.declare_from_annotation(env)
+ self.__class__ = PassStatNode
+ elif expr.is_attribute or expr.is_subscript:
+ # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type"
+ self.__class__ = PassStatNode
+
+ def analyse_expressions(self, env):
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
+ self.expr = self.expr.analyse_expressions(env)
+ # Repeat in case of node replacement.
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
+ return self
+
+ def nogil_check(self, env):
+ if self.expr.type.is_pyobject and self.expr.is_temp:
+ self.gil_error()
+
+ gil_message = "Discarding owned Python object"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
+ self.expr.generate_evaluation_code(code)
+ if not self.expr.is_temp and self.expr.result():
+ result = self.expr.result()
+ if not self.expr.type.is_void:
+ result = "(void)(%s)" % result
+ code.putln("%s;" % result)
+ self.expr.generate_disposal_code(code)
+ self.expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.expr.annotate(code)
+
+
+class AssignmentNode(StatNode):
+ # Abstract base class for assignment nodes.
+ #
+ # The analyse_expressions and generate_execution_code
+ # phases of assignments are split into two sub-phases
+ # each, to enable all the right hand sides of a
+ # parallel assignment to be evaluated before assigning
+ # to any of the left hand sides.
+
+ def analyse_expressions(self, env):
+ node = self.analyse_types(env)
+ if isinstance(node, AssignmentNode) and not isinstance(node, ParallelAssignmentNode):
+ if node.rhs.type.is_ptr and node.rhs.is_ephemeral():
+ error(self.pos, "Storing unsafe C derivative of temporary Python reference")
+ return node
+
+# def analyse_expressions(self, env):
+# self.analyse_expressions_1(env)
+# self.analyse_expressions_2(env)
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ self.generate_rhs_evaluation_code(code)
+ self.generate_assignment_code(code)
+
+
+class SingleAssignmentNode(AssignmentNode):
+ # The simplest case:
+ #
+ # a = b
+ #
+ # lhs ExprNode Left hand side
+ # rhs ExprNode Right hand side
+ # first bool Is this guaranteed the first assignment to lhs?
+ # is_overloaded_assignment bool Is this assignment done via an overloaded operator=
+ # exception_check
+ # exception_value
+
+ child_attrs = ["lhs", "rhs"]
+ first = False
+ is_overloaded_assignment = False
+ declaration_only = False
+
+ def analyse_declarations(self, env):
+ from . import ExprNodes
+
+ # handle declarations of the form x = cython.foo()
+ if isinstance(self.rhs, ExprNodes.CallNode):
+ func_name = self.rhs.function.as_cython_attribute()
+ if func_name:
+ args, kwds = self.rhs.explicit_args_kwds()
+ if func_name in ['declare', 'typedef']:
+ if len(args) > 2:
+ error(args[2].pos, "Invalid positional argument.")
+ return
+ if kwds is not None:
+ kwdict = kwds.compile_time_value(None)
+ if func_name == 'typedef' or 'visibility' not in kwdict:
+ error(kwds.pos, "Invalid keyword argument.")
+ return
+ visibility = kwdict['visibility']
+ else:
+ visibility = 'private'
+ type = args[0].analyse_as_type(env)
+ if type is None:
+ error(args[0].pos, "Unknown type")
+ return
+ lhs = self.lhs
+ if func_name == 'declare':
+ if isinstance(lhs, ExprNodes.NameNode):
+ vars = [(lhs.name, lhs.pos)]
+ elif isinstance(lhs, ExprNodes.TupleNode):
+ vars = [(var.name, var.pos) for var in lhs.args]
+ else:
+ error(lhs.pos, "Invalid declaration")
+ return
+ for var, pos in vars:
+ env.declare_var(var, type, pos, is_cdef=True, visibility=visibility)
+ if len(args) == 2:
+ # we have a value
+ self.rhs = args[1]
+ else:
+ self.declaration_only = True
+ else:
+ self.declaration_only = True
+ if not isinstance(lhs, ExprNodes.NameNode):
+ error(lhs.pos, "Invalid declaration.")
+ env.declare_typedef(lhs.name, type, self.pos, visibility='private')
+
+ elif func_name in ['struct', 'union']:
+ self.declaration_only = True
+ if len(args) > 0 or kwds is None:
+ error(self.rhs.pos, "Struct or union members must be given by name.")
+ return
+ members = []
+ for member, type_node in kwds.key_value_pairs:
+ type = type_node.analyse_as_type(env)
+ if type is None:
+ error(type_node.pos, "Unknown type")
+ else:
+ members.append((member.value, type, member.pos))
+ if len(members) < len(kwds.key_value_pairs):
+ return
+ if not isinstance(self.lhs, ExprNodes.NameNode):
+ error(self.lhs.pos, "Invalid declaration.")
+ name = self.lhs.name
+ scope = StructOrUnionScope(name)
+ env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
+ for member, type, pos in members:
+ scope.declare_var(member, type, pos)
+
+ elif func_name == 'fused_type':
+ # dtype = cython.fused_type(...)
+ self.declaration_only = True
+ if kwds:
+ error(self.rhs.function.pos,
+ "fused_type does not take keyword arguments")
+
+ fusednode = FusedTypeNode(self.rhs.pos,
+ name=self.lhs.name, types=args)
+ fusednode.analyse_declarations(env)
+
+ if self.declaration_only:
+ return
+ else:
+ self.lhs.analyse_target_declaration(env)
+
+ def analyse_types(self, env, use_temp=0):
+ from . import ExprNodes
+
+ self.rhs = self.rhs.analyse_types(env)
+
+ unrolled_assignment = self.unroll_rhs(env)
+ if unrolled_assignment:
+ return unrolled_assignment
+
+ self.lhs = self.lhs.analyse_target_types(env)
+ self.lhs.gil_assignment_check(env)
+ unrolled_assignment = self.unroll_lhs(env)
+ if unrolled_assignment:
+ return unrolled_assignment
+
+ if isinstance(self.lhs, ExprNodes.MemoryViewIndexNode):
+ self.lhs.analyse_broadcast_operation(self.rhs)
+ self.lhs = self.lhs.analyse_as_memview_scalar_assignment(self.rhs)
+ elif self.lhs.type.is_array:
+ if not isinstance(self.lhs, ExprNodes.SliceIndexNode):
+ # cannot assign to C array, only to its full slice
+ self.lhs = ExprNodes.SliceIndexNode(self.lhs.pos, base=self.lhs, start=None, stop=None)
+ self.lhs = self.lhs.analyse_target_types(env)
+
+ if self.lhs.type.is_cpp_class:
+ op = env.lookup_operator_for_types(self.pos, '=', [self.lhs.type, self.rhs.type])
+ if op:
+ rhs = self.rhs
+ self.is_overloaded_assignment = True
+ self.exception_check = op.type.exception_check
+ self.exception_value = op.type.exception_value
+ if self.exception_check == '+' and self.exception_value is None:
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ else:
+ rhs = self.rhs.coerce_to(self.lhs.type, env)
+ else:
+ rhs = self.rhs.coerce_to(self.lhs.type, env)
+
+ if use_temp or rhs.is_attribute or (
+ not rhs.is_name and not rhs.is_literal and
+ rhs.type.is_pyobject):
+ # things like (cdef) attribute access are not safe (traverses pointers)
+ rhs = rhs.coerce_to_temp(env)
+ elif rhs.type.is_pyobject:
+ rhs = rhs.coerce_to_simple(env)
+ self.rhs = rhs
+ return self
+
+ def unroll(self, node, target_size, env):
+ from . import ExprNodes, UtilNodes
+
+ base = node
+ start_node = stop_node = step_node = check_node = None
+
+ if node.type.is_ctuple:
+ slice_size = node.type.size
+
+ elif node.type.is_ptr or node.type.is_array:
+ while isinstance(node, ExprNodes.SliceIndexNode) and not (node.start or node.stop):
+ base = node = node.base
+ if isinstance(node, ExprNodes.SliceIndexNode):
+ base = node.base
+ start_node = node.start
+ if start_node:
+ start_node = start_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+ stop_node = node.stop
+ if stop_node:
+ stop_node = stop_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+ else:
+ if node.type.is_array and node.type.size:
+ stop_node = ExprNodes.IntNode(
+ self.pos, value=str(node.type.size),
+ constant_result=(node.type.size if isinstance(node.type.size, _py_int_types)
+ else ExprNodes.constant_value_not_set))
+ else:
+ error(self.pos, "C array iteration requires known end index")
+ return
+ step_node = None #node.step
+ if step_node:
+ step_node = step_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+
+ # TODO: Factor out SliceIndexNode.generate_slice_guard_code() for use here.
+ def get_const(node, none_value):
+ if node is None:
+ return none_value
+ elif node.has_constant_result():
+ return node.constant_result
+ else:
+ raise ValueError("Not a constant.")
+
+ try:
+ slice_size = (get_const(stop_node, None) - get_const(start_node, 0)) / get_const(step_node, 1)
+ except ValueError:
+ error(self.pos, "C array assignment currently requires known endpoints")
+ return
+
+ elif node.type.is_array:
+ slice_size = node.type.size
+ if not isinstance(slice_size, _py_int_types):
+ return # might still work when coercing to Python
+ else:
+ return
+
+ else:
+ return
+
+ if slice_size != target_size:
+ error(self.pos, "Assignment to/from slice of wrong length, expected %s, got %s" % (
+ slice_size, target_size))
+ return
+
+ items = []
+ base = UtilNodes.LetRefNode(base)
+ refs = [base]
+ if start_node and not start_node.is_literal:
+ start_node = UtilNodes.LetRefNode(start_node)
+ refs.append(start_node)
+ if stop_node and not stop_node.is_literal:
+ stop_node = UtilNodes.LetRefNode(stop_node)
+ refs.append(stop_node)
+ if step_node and not step_node.is_literal:
+ step_node = UtilNodes.LetRefNode(step_node)
+ refs.append(step_node)
+
+ for ix in range(target_size):
+ ix_node = ExprNodes.IntNode(self.pos, value=str(ix), constant_result=ix, type=PyrexTypes.c_py_ssize_t_type)
+ if step_node is not None:
+ if step_node.has_constant_result():
+ step_value = ix_node.constant_result * step_node.constant_result
+ ix_node = ExprNodes.IntNode(self.pos, value=str(step_value), constant_result=step_value)
+ else:
+ ix_node = ExprNodes.MulNode(self.pos, operator='*', operand1=step_node, operand2=ix_node)
+ if start_node is not None:
+ if start_node.has_constant_result() and ix_node.has_constant_result():
+ index_value = ix_node.constant_result + start_node.constant_result
+ ix_node = ExprNodes.IntNode(self.pos, value=str(index_value), constant_result=index_value)
+ else:
+ ix_node = ExprNodes.AddNode(
+ self.pos, operator='+', operand1=start_node, operand2=ix_node)
+ items.append(ExprNodes.IndexNode(self.pos, base=base, index=ix_node.analyse_types(env)))
+ return check_node, refs, items
+
+ def unroll_assignments(self, refs, check_node, lhs_list, rhs_list, env):
+ from . import UtilNodes
+ assignments = []
+ for lhs, rhs in zip(lhs_list, rhs_list):
+ assignments.append(SingleAssignmentNode(self.pos, lhs=lhs, rhs=rhs, first=self.first))
+ node = ParallelAssignmentNode(pos=self.pos, stats=assignments).analyse_expressions(env)
+ if check_node:
+ node = StatListNode(pos=self.pos, stats=[check_node, node])
+ for ref in refs[::-1]:
+ node = UtilNodes.LetNode(ref, node)
+ return node
+
+ def unroll_rhs(self, env):
+ from . import ExprNodes
+ if not isinstance(self.lhs, ExprNodes.TupleNode):
+ return
+ if any(arg.is_starred for arg in self.lhs.args):
+ return
+
+ unrolled = self.unroll(self.rhs, len(self.lhs.args), env)
+ if not unrolled:
+ return
+ check_node, refs, rhs = unrolled
+ return self.unroll_assignments(refs, check_node, self.lhs.args, rhs, env)
+
+ def unroll_lhs(self, env):
+ if self.lhs.type.is_ctuple:
+ # Handled directly.
+ return
+ from . import ExprNodes
+ if not isinstance(self.rhs, ExprNodes.TupleNode):
+ return
+
+ unrolled = self.unroll(self.lhs, len(self.rhs.args), env)
+ if not unrolled:
+ return
+ check_node, refs, lhs = unrolled
+ return self.unroll_assignments(refs, check_node, lhs, self.rhs.args, env)
+
+ def generate_rhs_evaluation_code(self, code):
+ self.rhs.generate_evaluation_code(code)
+
+ def generate_assignment_code(self, code, overloaded_assignment=False):
+ if self.is_overloaded_assignment:
+ self.lhs.generate_assignment_code(
+ self.rhs,
+ code,
+ overloaded_assignment=self.is_overloaded_assignment,
+ exception_check=self.exception_check,
+ exception_value=self.exception_value)
+ else:
+ self.lhs.generate_assignment_code(self.rhs, code)
+
+ def generate_function_definitions(self, env, code):
+ self.rhs.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.lhs.annotate(code)
+ self.rhs.annotate(code)
+
+
+class CascadedAssignmentNode(AssignmentNode):
+ # An assignment with multiple left hand sides:
+ #
+ # a = b = c
+ #
+ # lhs_list [ExprNode] Left hand sides
+ # rhs ExprNode Right hand sides
+ #
+ # Used internally:
+ #
+ # coerced_values [ExprNode] RHS coerced to all distinct LHS types
+ # cloned_values [ExprNode] cloned RHS value for each LHS
+ # assignment_overloads [Bool] If each assignment uses a C++ operator=
+
+ child_attrs = ["lhs_list", "rhs", "coerced_values", "cloned_values"]
+ cloned_values = None
+ coerced_values = None
+ assignment_overloads = None
+
+ def analyse_declarations(self, env):
+ for lhs in self.lhs_list:
+ lhs.analyse_target_declaration(env)
+
+ def analyse_types(self, env, use_temp=0):
+ from .ExprNodes import CloneNode, ProxyNode
+
+ # collect distinct types used on the LHS
+ lhs_types = set()
+ for i, lhs in enumerate(self.lhs_list):
+ lhs = self.lhs_list[i] = lhs.analyse_target_types(env)
+ lhs.gil_assignment_check(env)
+ lhs_types.add(lhs.type)
+
+ rhs = self.rhs.analyse_types(env)
+ # common special case: only one type needed on the LHS => coerce only once
+ if len(lhs_types) == 1:
+ # Avoid coercion for overloaded assignment operators.
+ if next(iter(lhs_types)).is_cpp_class:
+ op = env.lookup_operator('=', [lhs, self.rhs])
+ if not op:
+ rhs = rhs.coerce_to(lhs_types.pop(), env)
+ else:
+ rhs = rhs.coerce_to(lhs_types.pop(), env)
+
+ if not rhs.is_name and not rhs.is_literal and (
+ use_temp or rhs.is_attribute or rhs.type.is_pyobject):
+ rhs = rhs.coerce_to_temp(env)
+ else:
+ rhs = rhs.coerce_to_simple(env)
+ self.rhs = ProxyNode(rhs) if rhs.is_temp else rhs
+
+ # clone RHS and coerce it to all distinct LHS types
+ self.coerced_values = []
+ coerced_values = {}
+ self.assignment_overloads = []
+ for lhs in self.lhs_list:
+ overloaded = lhs.type.is_cpp_class and env.lookup_operator('=', [lhs, self.rhs])
+ self.assignment_overloads.append(overloaded)
+ if lhs.type not in coerced_values and lhs.type != rhs.type:
+ rhs = CloneNode(self.rhs)
+ if not overloaded:
+ rhs = rhs.coerce_to(lhs.type, env)
+ self.coerced_values.append(rhs)
+ coerced_values[lhs.type] = rhs
+
+ # clone coerced values for all LHS assignments
+ self.cloned_values = []
+ for lhs in self.lhs_list:
+ rhs = coerced_values.get(lhs.type, self.rhs)
+ self.cloned_values.append(CloneNode(rhs))
+ return self
+
+ def generate_rhs_evaluation_code(self, code):
+ self.rhs.generate_evaluation_code(code)
+
+ def generate_assignment_code(self, code, overloaded_assignment=False):
+ # prepare all coercions
+ for rhs in self.coerced_values:
+ rhs.generate_evaluation_code(code)
+ # assign clones to LHS
+ for lhs, rhs, overload in zip(self.lhs_list, self.cloned_values, self.assignment_overloads):
+ rhs.generate_evaluation_code(code)
+ lhs.generate_assignment_code(rhs, code, overloaded_assignment=overload)
+ # dispose of coerced values and original RHS
+ for rhs_value in self.coerced_values:
+ rhs_value.generate_disposal_code(code)
+ rhs_value.free_temps(code)
+ self.rhs.generate_disposal_code(code)
+ self.rhs.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.rhs.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ for rhs in self.coerced_values:
+ rhs.annotate(code)
+ for lhs, rhs in zip(self.lhs_list, self.cloned_values):
+ lhs.annotate(code)
+ rhs.annotate(code)
+ self.rhs.annotate(code)
+
+
+class ParallelAssignmentNode(AssignmentNode):
+ # A combined packing/unpacking assignment:
+ #
+ # a, b, c = d, e, f
+ #
+ # This has been rearranged by the parser into
+ #
+ # a = d ; b = e ; c = f
+ #
+ # but we must evaluate all the right hand sides
+ # before assigning to any of the left hand sides.
+ #
+ # stats [AssignmentNode] The constituent assignments
+
+ child_attrs = ["stats"]
+
+ def analyse_declarations(self, env):
+ for stat in self.stats:
+ stat.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.stats = [stat.analyse_types(env, use_temp=1)
+ for stat in self.stats]
+ return self
+
+# def analyse_expressions(self, env):
+# for stat in self.stats:
+# stat.analyse_expressions_1(env, use_temp=1)
+# for stat in self.stats:
+# stat.analyse_expressions_2(env)
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ for stat in self.stats:
+ stat.generate_rhs_evaluation_code(code)
+ for stat in self.stats:
+ stat.generate_assignment_code(code)
+
+ def generate_function_definitions(self, env, code):
+ for stat in self.stats:
+ stat.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ for stat in self.stats:
+ stat.annotate(code)
+
+
+class InPlaceAssignmentNode(AssignmentNode):
+ # An in place arithmetic operand:
+ #
+ # a += b
+ # a -= b
+ # ...
+ #
+ # lhs ExprNode Left hand side
+ # rhs ExprNode Right hand side
+ # operator char one of "+-*/%^&|"
+ #
+ # This code is a bit tricky because in order to obey Python
+ # semantics the sub-expressions (e.g. indices) of the lhs must
+ # not be evaluated twice. So we must re-use the values calculated
+ # in evaluation phase for the assignment phase as well.
+ # Fortunately, the type of the lhs node is fairly constrained
+ # (it must be a NameNode, AttributeNode, or IndexNode).
+
+ child_attrs = ["lhs", "rhs"]
+
+ def analyse_declarations(self, env):
+ self.lhs.analyse_target_declaration(env)
+
+ def analyse_types(self, env):
+ self.rhs = self.rhs.analyse_types(env)
+ self.lhs = self.lhs.analyse_target_types(env)
+
+ # When assigning to a fully indexed buffer or memoryview, coerce the rhs
+ if self.lhs.is_memview_index or self.lhs.is_buffer_access:
+ self.rhs = self.rhs.coerce_to(self.lhs.type, env)
+ elif self.lhs.type.is_string and self.operator in '+-':
+ # use pointer arithmetic for char* LHS instead of string concat
+ self.rhs = self.rhs.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ lhs, rhs = self.lhs, self.rhs
+ rhs.generate_evaluation_code(code)
+ lhs.generate_subexpr_evaluation_code(code)
+ c_op = self.operator
+ if c_op == "//":
+ c_op = "/"
+ elif c_op == "**":
+ error(self.pos, "No C inplace power operator")
+ if lhs.is_buffer_access or lhs.is_memview_index:
+ if lhs.type.is_pyobject:
+ error(self.pos, "In-place operators not allowed on object buffers in this release.")
+ if c_op in ('/', '%') and lhs.type.is_int and not code.globalstate.directives['cdivision']:
+ error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
+ lhs.generate_buffer_setitem_code(rhs, code, c_op)
+ elif lhs.is_memview_slice:
+ error(self.pos, "Inplace operators not supported on memoryview slices")
+ else:
+ # C++
+ # TODO: make sure overload is declared
+ code.putln("%s %s= %s;" % (lhs.result(), c_op, rhs.result()))
+ lhs.generate_subexpr_disposal_code(code)
+ lhs.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def annotate(self, code):
+ self.lhs.annotate(code)
+ self.rhs.annotate(code)
+
+ def create_binop_node(self):
+ from . import ExprNodes
+ return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
+
+
+class PrintStatNode(StatNode):
+ # print statement
+ #
+ # arg_tuple TupleNode
+ # stream ExprNode or None (stdout)
+ # append_newline boolean
+
+ child_attrs = ["arg_tuple", "stream"]
+
+ def analyse_expressions(self, env):
+ if self.stream:
+ stream = self.stream.analyse_expressions(env)
+ self.stream = stream.coerce_to_pyobject(env)
+ arg_tuple = self.arg_tuple.analyse_expressions(env)
+ self.arg_tuple = arg_tuple.coerce_to_pyobject(env)
+ env.use_utility_code(printing_utility_code)
+ if len(self.arg_tuple.args) == 1 and self.append_newline:
+ env.use_utility_code(printing_one_utility_code)
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Python print statement"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ if self.stream:
+ self.stream.generate_evaluation_code(code)
+ stream_result = self.stream.py_result()
+ else:
+ stream_result = '0'
+ if len(self.arg_tuple.args) == 1 and self.append_newline:
+ arg = self.arg_tuple.args[0]
+ arg.generate_evaluation_code(code)
+
+ code.putln(
+ "if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
+ stream_result,
+ arg.py_result(),
+ code.error_goto(self.pos)))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ else:
+ self.arg_tuple.generate_evaluation_code(code)
+ code.putln(
+ "if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
+ stream_result,
+ self.arg_tuple.py_result(),
+ self.append_newline,
+ code.error_goto(self.pos)))
+ self.arg_tuple.generate_disposal_code(code)
+ self.arg_tuple.free_temps(code)
+
+ if self.stream:
+ self.stream.generate_disposal_code(code)
+ self.stream.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ if self.stream:
+ self.stream.generate_function_definitions(env, code)
+ self.arg_tuple.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ if self.stream:
+ self.stream.annotate(code)
+ self.arg_tuple.annotate(code)
+
+
+class ExecStatNode(StatNode):
+ # exec statement
+ #
+ # args [ExprNode]
+
+ child_attrs = ["args"]
+
+ def analyse_expressions(self, env):
+ for i, arg in enumerate(self.args):
+ arg = arg.analyse_expressions(env)
+ arg = arg.coerce_to_pyobject(env)
+ self.args[i] = arg
+ env.use_utility_code(Builtin.pyexec_utility_code)
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Python exec statement"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ args = []
+ for arg in self.args:
+ arg.generate_evaluation_code(code)
+ args.append(arg.py_result())
+ args = tuple(args + ['0', '0'][:3-len(args)])
+ temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
+ code.putln("%s = __Pyx_PyExec3(%s, %s, %s);" % ((temp_result,) + args))
+ for arg in self.args:
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ code.putln(
+ code.error_goto_if_null(temp_result, self.pos))
+ code.put_gotref(temp_result)
+ code.put_decref_clear(temp_result, py_object_type)
+ code.funcstate.release_temp(temp_result)
+
+ def annotate(self, code):
+ for arg in self.args:
+ arg.annotate(code)
+
+
+class DelStatNode(StatNode):
+ # del statement
+ #
+ # args [ExprNode]
+
+ child_attrs = ["args"]
+ ignore_nonexisting = False
+
+ def analyse_declarations(self, env):
+ for arg in self.args:
+ arg.analyse_target_declaration(env)
+
+ def analyse_expressions(self, env):
+ for i, arg in enumerate(self.args):
+ arg = self.args[i] = arg.analyse_target_expression(env, None)
+ if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice):
+ if arg.is_name and arg.entry.is_cglobal:
+ error(arg.pos, "Deletion of global C variable")
+ elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
+ self.cpp_check(env)
+ elif arg.type.is_cpp_class:
+ error(arg.pos, "Deletion of non-heap C++ object")
+ elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
+ pass # del ba[i]
+ else:
+ error(arg.pos, "Deletion of non-Python, non-C++ object")
+ #arg.release_target_temp(env)
+ return self
+
+ def nogil_check(self, env):
+ for arg in self.args:
+ if arg.type.is_pyobject:
+ self.gil_error()
+
+ gil_message = "Deleting Python object"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ for arg in self.args:
+ if (arg.type.is_pyobject or
+ arg.type.is_memoryviewslice or
+ arg.is_subscript and arg.base.type is Builtin.bytearray_type):
+ arg.generate_deletion_code(
+ code, ignore_nonexisting=self.ignore_nonexisting)
+ elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
+ arg.generate_evaluation_code(code)
+ code.putln("delete %s;" % arg.result())
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ # else error reported earlier
+
+ def annotate(self, code):
+ for arg in self.args:
+ arg.annotate(code)
+
+
+class PassStatNode(StatNode):
+ # pass statement
+
+ child_attrs = []
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class IndirectionNode(StatListNode):
+ """
+ This adds an indirection so that the node can be shared and a subtree can
+ be removed at any time by clearing self.stats.
+ """
+
+ def __init__(self, stats):
+ super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
+
+
+class BreakStatNode(StatNode):
+
+ child_attrs = []
+ is_terminator = True
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ if not code.break_label:
+ error(self.pos, "break statement not inside loop")
+ else:
+ code.put_goto(code.break_label)
+
+
+class ContinueStatNode(StatNode):
+
+ child_attrs = []
+ is_terminator = True
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ if not code.continue_label:
+ error(self.pos, "continue statement not inside loop")
+ return
+ code.mark_pos(self.pos)
+ code.put_goto(code.continue_label)
+
+
+class ReturnStatNode(StatNode):
+ # return statement
+ #
+ # value ExprNode or None
+ # return_type PyrexType
+ # in_generator return inside of generator => raise StopIteration
+ # in_async_gen return inside of async generator
+
+ child_attrs = ["value"]
+ is_terminator = True
+ in_generator = False
+ in_async_gen = False
+
+ # Whether we are in a parallel section
+ in_parallel = False
+
+ def analyse_expressions(self, env):
+ return_type = env.return_type
+ self.return_type = return_type
+ if not return_type:
+ error(self.pos, "Return not inside a function body")
+ return self
+ if self.value:
+ if self.in_async_gen:
+ error(self.pos, "Return with value in async generator")
+ self.value = self.value.analyse_types(env)
+ if return_type.is_void or return_type.is_returncode:
+ error(self.value.pos, "Return with value in void function")
+ else:
+ self.value = self.value.coerce_to(env.return_type, env)
+ else:
+ if (not return_type.is_void
+ and not return_type.is_pyobject
+ and not return_type.is_returncode):
+ error(self.pos, "Return value required")
+ return self
+
+ def nogil_check(self, env):
+ if self.return_type.is_pyobject:
+ self.gil_error()
+
+ gil_message = "Returning Python object"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ if not self.return_type:
+ # error reported earlier
+ return
+
+ value = self.value
+ if self.return_type.is_pyobject:
+ code.put_xdecref(Naming.retval_cname, self.return_type)
+ if value and value.is_none:
+ # Use specialised default handling for "return None".
+ value = None
+
+ if value:
+ value.generate_evaluation_code(code)
+ if self.return_type.is_memoryviewslice:
+ from . import MemoryView
+ MemoryView.put_acquire_memoryviewslice(
+ lhs_cname=Naming.retval_cname,
+ lhs_type=self.return_type,
+ lhs_pos=value.pos,
+ rhs=value,
+ code=code,
+ have_gil=self.in_nogil_context)
+ value.generate_post_assignment_code(code)
+ elif self.in_generator:
+ # return value == raise StopIteration(value), but uncatchable
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
+ code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
+ Naming.retval_cname,
+ value.py_result()))
+ value.generate_disposal_code(code)
+ else:
+ value.make_owned_reference(code)
+ code.putln("%s = %s;" % (
+ Naming.retval_cname,
+ value.result_as(self.return_type)))
+ value.generate_post_assignment_code(code)
+ value.free_temps(code)
+ else:
+ if self.return_type.is_pyobject:
+ if self.in_generator:
+ if self.in_async_gen:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
+ code.putln("%s = NULL;" % Naming.retval_cname)
+ else:
+ code.put_init_to_py_none(Naming.retval_cname, self.return_type)
+ elif self.return_type.is_returncode:
+ self.put_return(code, self.return_type.default_value)
+
+ for cname, type in code.funcstate.temps_holding_reference():
+ code.put_decref_clear(cname, type)
+
+ code.put_goto(code.return_label)
+
+ def put_return(self, code, value):
+ if self.in_parallel:
+ code.putln_openmp("#pragma omp critical(__pyx_returning)")
+ code.putln("%s = %s;" % (Naming.retval_cname, value))
+
+ def generate_function_definitions(self, env, code):
+ if self.value is not None:
+ self.value.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ if self.value:
+ self.value.annotate(code)
+
+
+class RaiseStatNode(StatNode):
+ # raise statement
+ #
+ # exc_type ExprNode or None
+ # exc_value ExprNode or None
+ # exc_tb ExprNode or None
+ # cause ExprNode or None
+
+ child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
+ is_terminator = True
+
+ def analyse_expressions(self, env):
+ if self.exc_type:
+ exc_type = self.exc_type.analyse_types(env)
+ self.exc_type = exc_type.coerce_to_pyobject(env)
+ if self.exc_value:
+ exc_value = self.exc_value.analyse_types(env)
+ self.exc_value = exc_value.coerce_to_pyobject(env)
+ if self.exc_tb:
+ exc_tb = self.exc_tb.analyse_types(env)
+ self.exc_tb = exc_tb.coerce_to_pyobject(env)
+ if self.cause:
+ cause = self.cause.analyse_types(env)
+ self.cause = cause.coerce_to_pyobject(env)
+ # special cases for builtin exceptions
+ self.builtin_exc_name = None
+ if self.exc_type and not self.exc_value and not self.exc_tb:
+ exc = self.exc_type
+ from . import ExprNodes
+ if (isinstance(exc, ExprNodes.SimpleCallNode) and
+ not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))):
+ exc = exc.function # extract the exception type
+ if exc.is_name and exc.entry.is_builtin:
+ self.builtin_exc_name = exc.name
+ if self.builtin_exc_name == 'MemoryError':
+ self.exc_type = None # has a separate implementation
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Raising exception"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ if self.builtin_exc_name == 'MemoryError':
+ code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
+ return
+
+ if self.exc_type:
+ self.exc_type.generate_evaluation_code(code)
+ type_code = self.exc_type.py_result()
+ if self.exc_type.is_name:
+ code.globalstate.use_entry_utility_code(self.exc_type.entry)
+ else:
+ type_code = "0"
+ if self.exc_value:
+ self.exc_value.generate_evaluation_code(code)
+ value_code = self.exc_value.py_result()
+ else:
+ value_code = "0"
+ if self.exc_tb:
+ self.exc_tb.generate_evaluation_code(code)
+ tb_code = self.exc_tb.py_result()
+ else:
+ tb_code = "0"
+ if self.cause:
+ self.cause.generate_evaluation_code(code)
+ cause_code = self.cause.py_result()
+ else:
+ cause_code = "0"
+ code.globalstate.use_utility_code(raise_utility_code)
+ code.putln(
+ "__Pyx_Raise(%s, %s, %s, %s);" % (
+ type_code,
+ value_code,
+ tb_code,
+ cause_code))
+ for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
+ if obj:
+ obj.generate_disposal_code(code)
+ obj.free_temps(code)
+ code.putln(
+ code.error_goto(self.pos))
+
+ def generate_function_definitions(self, env, code):
+ if self.exc_type is not None:
+ self.exc_type.generate_function_definitions(env, code)
+ if self.exc_value is not None:
+ self.exc_value.generate_function_definitions(env, code)
+ if self.exc_tb is not None:
+ self.exc_tb.generate_function_definitions(env, code)
+ if self.cause is not None:
+ self.cause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ if self.exc_type:
+ self.exc_type.annotate(code)
+ if self.exc_value:
+ self.exc_value.annotate(code)
+ if self.exc_tb:
+ self.exc_tb.annotate(code)
+ if self.cause:
+ self.cause.annotate(code)
+
+
+class ReraiseStatNode(StatNode):
+
+ child_attrs = []
+ is_terminator = True
+
+ def analyse_expressions(self, env):
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Raising exception"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ vars = code.funcstate.exc_vars
+ if vars:
+ code.globalstate.use_utility_code(restore_exception_utility_code)
+ code.put_giveref(vars[0])
+ code.put_giveref(vars[1])
+ # fresh exceptions may not have a traceback yet (-> finally!)
+ code.put_xgiveref(vars[2])
+ code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % tuple(vars))
+ for varname in vars:
+ code.put("%s = 0; " % varname)
+ code.putln()
+ code.putln(code.error_goto(self.pos))
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ReRaiseException", "Exceptions.c"))
+ code.putln("__Pyx_ReraiseException(); %s" % code.error_goto(self.pos))
+
+class AssertStatNode(StatNode):
+ # assert statement
+ #
+ # cond ExprNode
+ # value ExprNode or None
+
+ child_attrs = ["cond", "value"]
+
+ def analyse_expressions(self, env):
+ self.cond = self.cond.analyse_boolean_expression(env)
+ if self.value:
+ value = self.value.analyse_types(env)
+ if value.type is Builtin.tuple_type or not value.type.is_builtin_type:
+ # prevent tuple values from being interpreted as argument value tuples
+ from .ExprNodes import TupleNode
+ value = TupleNode(value.pos, args=[value], slow=True)
+ self.value = value.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
+ else:
+ self.value = value.coerce_to_pyobject(env)
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Raising exception"
+
+ def generate_execution_code(self, code):
+ code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
+ code.putln("if (unlikely(!Py_OptimizeFlag)) {")
+ code.mark_pos(self.pos)
+ self.cond.generate_evaluation_code(code)
+ code.putln(
+ "if (unlikely(!%s)) {" % self.cond.result())
+ if self.value:
+ self.value.generate_evaluation_code(code)
+ code.putln(
+ "PyErr_SetObject(PyExc_AssertionError, %s);" % self.value.py_result())
+ self.value.generate_disposal_code(code)
+ self.value.free_temps(code)
+ else:
+ code.putln(
+ "PyErr_SetNone(PyExc_AssertionError);")
+ code.putln(
+ code.error_goto(self.pos))
+ code.putln(
+ "}")
+ self.cond.generate_disposal_code(code)
+ self.cond.free_temps(code)
+ code.putln(
+ "}")
+ code.putln("#endif")
+
+ def generate_function_definitions(self, env, code):
+ self.cond.generate_function_definitions(env, code)
+ if self.value is not None:
+ self.value.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.cond.annotate(code)
+ if self.value:
+ self.value.annotate(code)
+
+
+class IfStatNode(StatNode):
+ # if statement
+ #
+ # if_clauses [IfClauseNode]
+ # else_clause StatNode or None
+
+ child_attrs = ["if_clauses", "else_clause"]
+
+ def analyse_declarations(self, env):
+ for if_clause in self.if_clauses:
+ if_clause.analyse_declarations(env)
+ if self.else_clause:
+ self.else_clause.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.if_clauses = [if_clause.analyse_expressions(env) for if_clause in self.if_clauses]
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ end_label = code.new_label()
+ last = len(self.if_clauses)
+ if self.else_clause:
+ # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
+ self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
+ else:
+ last -= 1 # avoid redundant goto at end of last if-clause
+ for i, if_clause in enumerate(self.if_clauses):
+ self._set_branch_hint(if_clause, if_clause.body)
+ if_clause.generate_execution_code(code, end_label, is_last=i == last)
+ if self.else_clause:
+ code.mark_pos(self.else_clause.pos)
+ code.putln("/*else*/ {")
+ self.else_clause.generate_execution_code(code)
+ code.putln("}")
+ code.put_label(end_label)
+
+ def _set_branch_hint(self, clause, statements_node, inverse=False):
+ if not statements_node.is_terminator:
+ return
+ if not isinstance(statements_node, StatListNode) or not statements_node.stats:
+ return
+ # Anything that unconditionally raises exceptions should be considered unlikely.
+ if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
+ if len(statements_node.stats) > 1:
+ # Allow simple statements before the 'raise', but no conditions, loops, etc.
+ non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
+ for node in statements_node.stats[:-1]:
+ if not isinstance(node, non_branch_nodes):
+ return
+ clause.branch_hint = 'likely' if inverse else 'unlikely'
+
+ def generate_function_definitions(self, env, code):
+ for clause in self.if_clauses:
+ clause.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ for if_clause in self.if_clauses:
+ if_clause.annotate(code)
+ if self.else_clause:
+ self.else_clause.annotate(code)
+
+
+class IfClauseNode(Node):
+ # if or elif clause in an if statement
+ #
+ # condition ExprNode
+ # body StatNode
+
+ child_attrs = ["condition", "body"]
+ branch_hint = None
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.condition = self.condition.analyse_temp_boolean_expression(env)
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code, end_label, is_last):
+ self.condition.generate_evaluation_code(code)
+ code.mark_pos(self.pos)
+ condition = self.condition.result()
+ if self.branch_hint:
+ condition = '%s(%s)' % (self.branch_hint, condition)
+ code.putln("if (%s) {" % condition)
+ self.condition.generate_disposal_code(code)
+ self.condition.free_temps(code)
+ self.body.generate_execution_code(code)
+ code.mark_pos(self.pos, trace=False)
+ if not (is_last or self.body.is_terminator):
+ code.put_goto(end_label)
+ code.putln("}")
+
+ def generate_function_definitions(self, env, code):
+ self.condition.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.condition.annotate(code)
+ self.body.annotate(code)
+
+
+class SwitchCaseNode(StatNode):
+ # Generated in the optimization of an if-elif-else node
+ #
+ # conditions [ExprNode]
+ # body StatNode
+
+ child_attrs = ['conditions', 'body']
+
+ def generate_condition_evaluation_code(self, code):
+ for cond in self.conditions:
+ cond.generate_evaluation_code(code)
+
+ def generate_execution_code(self, code):
+ num_conditions = len(self.conditions)
+ line_tracing_enabled = code.globalstate.directives['linetrace']
+ for i, cond in enumerate(self.conditions, 1):
+ code.putln("case %s:" % cond.result())
+ code.mark_pos(cond.pos) # Tracing code must appear *after* the 'case' statement.
+ if line_tracing_enabled and i < num_conditions:
+ # Allow fall-through after the line tracing code.
+ code.putln('CYTHON_FALLTHROUGH;')
+ self.body.generate_execution_code(code)
+ code.mark_pos(self.pos, trace=False)
+ code.putln("break;")
+
+ def generate_function_definitions(self, env, code):
+ for cond in self.conditions:
+ cond.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ for cond in self.conditions:
+ cond.annotate(code)
+ self.body.annotate(code)
+
+
+class SwitchStatNode(StatNode):
+ # Generated in the optimization of an if-elif-else node
+ #
+ # test ExprNode
+ # cases [SwitchCaseNode]
+ # else_clause StatNode or None
+
+ child_attrs = ['test', 'cases', 'else_clause']
+
+ def generate_execution_code(self, code):
+ self.test.generate_evaluation_code(code)
+ # Make sure all conditions are evaluated before going into the switch() statement.
+ # This is required in order to prevent any execution code from leaking into the space between the cases.
+ for case in self.cases:
+ case.generate_condition_evaluation_code(code)
+ code.mark_pos(self.pos)
+ code.putln("switch (%s) {" % self.test.result())
+ for case in self.cases:
+ case.generate_execution_code(code)
+ if self.else_clause is not None:
+ code.putln("default:")
+ self.else_clause.generate_execution_code(code)
+ code.putln("break;")
+ else:
+ # Always generate a default clause to prevent C compiler warnings
+ # about unmatched enum values (it was not the user who decided to
+ # generate the switch statement, so shouldn't be bothered).
+ code.putln("default: break;")
+ code.putln("}")
+ self.test.generate_disposal_code(code)
+ self.test.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.test.generate_function_definitions(env, code)
+ for case in self.cases:
+ case.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.test.annotate(code)
+ for case in self.cases:
+ case.annotate(code)
+ if self.else_clause is not None:
+ self.else_clause.annotate(code)
+
+
+class LoopNode(object):
+ pass
+
+
+class WhileStatNode(LoopNode, StatNode):
+ # while statement
+ #
+ # condition ExprNode
+ # body StatNode
+ # else_clause StatNode
+
+ child_attrs = ["condition", "body", "else_clause"]
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+ if self.else_clause:
+ self.else_clause.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ if self.condition:
+ self.condition = self.condition.analyse_temp_boolean_expression(env)
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ old_loop_labels = code.new_loop_labels()
+ code.putln(
+ "while (1) {")
+ if self.condition:
+ self.condition.generate_evaluation_code(code)
+ self.condition.generate_disposal_code(code)
+ code.putln(
+ "if (!%s) break;" % self.condition.result())
+ self.condition.free_temps(code)
+ self.body.generate_execution_code(code)
+ code.put_label(code.continue_label)
+ code.putln("}")
+ break_label = code.break_label
+ code.set_loop_labels(old_loop_labels)
+ if self.else_clause:
+ code.mark_pos(self.else_clause.pos)
+ code.putln("/*else*/ {")
+ self.else_clause.generate_execution_code(code)
+ code.putln("}")
+ code.put_label(break_label)
+
+ def generate_function_definitions(self, env, code):
+ if self.condition:
+ self.condition.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ if self.condition:
+ self.condition.annotate(code)
+ self.body.annotate(code)
+ if self.else_clause:
+ self.else_clause.annotate(code)
+
+
+class DictIterationNextNode(Node):
+ # Helper node for calling PyDict_Next() inside of a WhileStatNode
+ # and checking the dictionary size for changes. Created in
+ # Optimize.py.
+ child_attrs = ['dict_obj', 'expected_size', 'pos_index_var',
+ 'coerced_key_var', 'coerced_value_var', 'coerced_tuple_var',
+ 'key_target', 'value_target', 'tuple_target', 'is_dict_flag']
+
+ coerced_key_var = key_ref = None
+ coerced_value_var = value_ref = None
+ coerced_tuple_var = tuple_ref = None
+
+ def __init__(self, dict_obj, expected_size, pos_index_var,
+ key_target, value_target, tuple_target, is_dict_flag):
+ Node.__init__(
+ self, dict_obj.pos,
+ dict_obj=dict_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ key_target=key_target,
+ value_target=value_target,
+ tuple_target=tuple_target,
+ is_dict_flag=is_dict_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.dict_obj = self.dict_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ if self.pos_index_var:
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ if self.key_target:
+ self.key_target = self.key_target.analyse_target_types(env)
+ self.key_ref = ExprNodes.TempNode(self.key_target.pos, PyrexTypes.py_object_type)
+ self.coerced_key_var = self.key_ref.coerce_to(self.key_target.type, env)
+ if self.value_target:
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ if self.tuple_target:
+ self.tuple_target = self.tuple_target.analyse_target_types(env)
+ self.tuple_ref = ExprNodes.TempNode(self.tuple_target.pos, PyrexTypes.py_object_type)
+ self.coerced_tuple_var = self.tuple_ref.coerce_to(self.tuple_target.type, env)
+ self.is_dict_flag = self.is_dict_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.dict_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("dict_iter", "Optimize.c"))
+ self.dict_obj.generate_evaluation_code(code)
+
+ assignments = []
+ temp_addresses = []
+ for var, result, target in [(self.key_ref, self.coerced_key_var, self.key_target),
+ (self.value_ref, self.coerced_value_var, self.value_target),
+ (self.tuple_ref, self.coerced_tuple_var, self.tuple_target)]:
+ if target is None:
+ addr = 'NULL'
+ else:
+ assignments.append((var, result, target))
+ var.allocate(code)
+ addr = '&%s' % var.result()
+ temp_addresses.append(addr)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_dict_iter_next(%s, %s, &%s, %s, %s, %s, %s);" % (
+ result_temp,
+ self.dict_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ temp_addresses[0],
+ temp_addresses[1],
+ temp_addresses[2],
+ self.is_dict_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ for var, result, target in assignments:
+ code.put_gotref(var.result())
+ for var, result, target in assignments:
+ result.generate_evaluation_code(code)
+ for var, result, target in assignments:
+ target.generate_assignment_code(result, code)
+ var.release(code)
+
+
+class SetIterationNextNode(Node):
+ # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
+ # and checking the set size for changes. Created in Optimize.py.
+ child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
+ 'coerced_value_var', 'value_target', 'is_set_flag']
+
+ coerced_value_var = value_ref = None
+
+ def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
+ Node.__init__(
+ self, set_obj.pos,
+ set_obj=set_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ value_target=value_target,
+ is_set_flag=is_set_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.set_obj = self.set_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ self.is_set_flag = self.is_set_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.set_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
+ self.set_obj.generate_evaluation_code(code)
+
+ value_ref = self.value_ref
+ value_ref.allocate(code)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
+ result_temp,
+ self.set_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ value_ref.result(),
+ self.is_set_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ code.put_gotref(value_ref.result())
+ self.coerced_value_var.generate_evaluation_code(code)
+ self.value_target.generate_assignment_code(self.coerced_value_var, code)
+ value_ref.release(code)
+
+
+def ForStatNode(pos, **kw):
+ if 'iterator' in kw:
+ if kw['iterator'].is_async:
+ return AsyncForStatNode(pos, **kw)
+ else:
+ return ForInStatNode(pos, **kw)
+ else:
+ return ForFromStatNode(pos, **kw)
+
+
+class _ForInStatNode(LoopNode, StatNode):
+ # Base class of 'for-in' statements.
+ #
+ # target ExprNode
+ # iterator IteratorNode | AIterAwaitExprNode(AsyncIteratorNode)
+ # body StatNode
+ # else_clause StatNode
+ # item NextNode | AwaitExprNode(AsyncNextNode)
+ # is_async boolean true for 'async for' statements
+
+ child_attrs = ["target", "item", "iterator", "body", "else_clause"]
+ item = None
+ is_async = False
+
+ def _create_item_node(self):
+ raise NotImplementedError("must be implemented by subclasses")
+
+ def analyse_declarations(self, env):
+ self.target.analyse_target_declaration(env)
+ self.body.analyse_declarations(env)
+ if self.else_clause:
+ self.else_clause.analyse_declarations(env)
+ self._create_item_node()
+
+ def analyse_expressions(self, env):
+ self.target = self.target.analyse_target_types(env)
+ self.iterator = self.iterator.analyse_expressions(env)
+ self._create_item_node() # must rewrap self.item after analysis
+ self.item = self.item.analyse_expressions(env)
+ if (not self.is_async and
+ (self.iterator.type.is_ptr or self.iterator.type.is_array) and
+ self.target.type.assignable_from(self.iterator.type)):
+ # C array slice optimization.
+ pass
+ else:
+ self.item = self.item.coerce_to(self.target.type, env)
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ old_loop_labels = code.new_loop_labels()
+ self.iterator.generate_evaluation_code(code)
+ code.putln("for (;;) {")
+ self.item.generate_evaluation_code(code)
+ self.target.generate_assignment_code(self.item, code)
+ self.body.generate_execution_code(code)
+ code.mark_pos(self.pos)
+ code.put_label(code.continue_label)
+ code.putln("}")
+ break_label = code.break_label
+ code.set_loop_labels(old_loop_labels)
+
+ if self.else_clause:
+ # In nested loops, the 'else' block can contain 'continue' or 'break'
+ # statements for the outer loop, but we may need to generate cleanup code
+ # before taking those paths, so we intercept them here.
+ orig_exit_labels = (code.continue_label, code.break_label)
+ code.continue_label = code.new_label('outer_continue')
+ code.break_label = code.new_label('outer_break')
+
+ code.putln("/*else*/ {")
+ self.else_clause.generate_execution_code(code)
+ code.putln("}")
+
+ needs_goto_end = not self.else_clause.is_terminator
+ for exit_label, orig_exit_label in zip([code.continue_label, code.break_label], orig_exit_labels):
+ if not code.label_used(exit_label):
+ continue
+ if needs_goto_end:
+ code.put_goto(break_label)
+ needs_goto_end = False
+ code.mark_pos(self.pos)
+ code.put_label(exit_label)
+ self.iterator.generate_disposal_code(code)
+ code.put_goto(orig_exit_label)
+ code.set_loop_labels(old_loop_labels)
+
+ code.mark_pos(self.pos)
+ if code.label_used(break_label):
+ code.put_label(break_label)
+ self.iterator.generate_disposal_code(code)
+ self.iterator.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.target.generate_function_definitions(env, code)
+ self.iterator.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.target.annotate(code)
+ self.iterator.annotate(code)
+ self.body.annotate(code)
+ if self.else_clause:
+ self.else_clause.annotate(code)
+ self.item.annotate(code)
+
+
+class ForInStatNode(_ForInStatNode):
+ # 'for' statement
+
+ is_async = False
+
+ def _create_item_node(self):
+ from .ExprNodes import NextNode
+ self.item = NextNode(self.iterator)
+
+
+class AsyncForStatNode(_ForInStatNode):
+ # 'async for' statement
+ #
+ # iterator AIterAwaitExprNode(AsyncIteratorNode)
+ # item AwaitIterNextExprNode(AsyncIteratorNode)
+
+ is_async = True
+
+ def __init__(self, pos, **kw):
+ assert 'item' not in kw
+ from . import ExprNodes
+ # AwaitExprNodes must appear before running MarkClosureVisitor
+ kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
+ _ForInStatNode.__init__(self, pos, **kw)
+
+ def _create_item_node(self):
+ from . import ExprNodes
+ self.item.arg = ExprNodes.AsyncNextNode(self.iterator)
+
+
+class ForFromStatNode(LoopNode, StatNode):
+ # for name from expr rel name rel expr
+ #
+ # target NameNode
+ # bound1 ExprNode
+ # relation1 string
+ # relation2 string
+ # bound2 ExprNode
+ # step ExprNode or None
+ # body StatNode
+ # else_clause StatNode or None
+ #
+ # Used internally:
+ #
+ # from_range bool
+ # is_py_target bool
+ # loopvar_node ExprNode (usually a NameNode or temp node)
+ # py_loopvar_node PyTempNode or None
+ child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
+
+ is_py_target = False
+ loopvar_node = None
+ py_loopvar_node = None
+ from_range = False
+
+ gil_message = "For-loop using object bounds or target"
+
+ def nogil_check(self, env):
+ for x in (self.target, self.bound1, self.bound2):
+ if x.type.is_pyobject:
+ self.gil_error()
+
+ def analyse_declarations(self, env):
+ self.target.analyse_target_declaration(env)
+ self.body.analyse_declarations(env)
+ if self.else_clause:
+ self.else_clause.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.target = self.target.analyse_target_types(env)
+ self.bound1 = self.bound1.analyse_types(env)
+ self.bound2 = self.bound2.analyse_types(env)
+ if self.step is not None:
+ if isinstance(self.step, ExprNodes.UnaryMinusNode):
+ warning(self.step.pos, "Probable infinite loop in for-from-by statement. "
+ "Consider switching the directions of the relations.", 2)
+ self.step = self.step.analyse_types(env)
+
+ self.set_up_loop(env)
+ target_type = self.target.type
+ if not (target_type.is_pyobject or target_type.is_numeric):
+ error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
+
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def set_up_loop(self, env):
+ from . import ExprNodes
+
+ target_type = self.target.type
+ if target_type.is_numeric:
+ loop_type = target_type
+ else:
+ if target_type.is_enum:
+ warning(self.target.pos,
+ "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
+ loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
+ if not self.bound1.type.is_pyobject:
+ loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
+ if not self.bound2.type.is_pyobject:
+ loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
+ if self.step is not None and not self.step.type.is_pyobject:
+ loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
+ self.bound1 = self.bound1.coerce_to(loop_type, env)
+ self.bound2 = self.bound2.coerce_to(loop_type, env)
+ if not self.bound2.is_literal:
+ self.bound2 = self.bound2.coerce_to_temp(env)
+ if self.step is not None:
+ self.step = self.step.coerce_to(loop_type, env)
+ if not self.step.is_literal:
+ self.step = self.step.coerce_to_temp(env)
+
+ if target_type.is_numeric or target_type.is_enum:
+ self.is_py_target = False
+ if isinstance(self.target, ExprNodes.BufferIndexNode):
+ raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
+ self.loopvar_node = self.target
+ self.py_loopvar_node = None
+ else:
+ self.is_py_target = True
+ c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
+ self.loopvar_node = c_loopvar_node
+ self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ old_loop_labels = code.new_loop_labels()
+ from_range = self.from_range
+ self.bound1.generate_evaluation_code(code)
+ self.bound2.generate_evaluation_code(code)
+ offset, incop = self.relation_table[self.relation1]
+ if self.step is not None:
+ self.step.generate_evaluation_code(code)
+ step = self.step.result()
+ incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
+ else:
+ step = '1'
+
+ from . import ExprNodes
+ if isinstance(self.loopvar_node, ExprNodes.TempNode):
+ self.loopvar_node.allocate(code)
+ if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
+ self.py_loopvar_node.allocate(code)
+
+ loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
+
+ if from_range and not self.is_py_target:
+ loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
+ else:
+ loopvar_name = self.loopvar_node.result()
+ if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
+ # Handle the case where the endpoint of an unsigned int iteration
+ # is within step of 0.
+ code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
+ loopvar_name,
+ self.bound1.result(), offset, step,
+ loopvar_name, self.relation2, self.bound2.result(), step,
+ loopvar_name, incop))
+ else:
+ code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % (
+ loopvar_name,
+ self.bound1.result(), offset,
+ loopvar_name, self.relation2, self.bound2.result(),
+ loopvar_name, incop))
+
+ coerced_loopvar_node = self.py_loopvar_node
+ if coerced_loopvar_node is None and from_range:
+ coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
+ if coerced_loopvar_node is not None:
+ coerced_loopvar_node.generate_evaluation_code(code)
+ self.target.generate_assignment_code(coerced_loopvar_node, code)
+
+ self.body.generate_execution_code(code)
+ code.put_label(code.continue_label)
+
+ if not from_range and self.py_loopvar_node:
+ # This mess is to make for..from loops with python targets behave
+ # exactly like those with C targets with regards to re-assignment
+ # of the loop variable.
+ if self.target.entry.is_pyglobal:
+ # We know target is a NameNode, this is the only ugly case.
+ target_node = ExprNodes.PyTempNode(self.target.pos, None)
+ target_node.allocate(code)
+ interned_cname = code.intern_identifier(self.target.entry.name)
+ if self.target.entry.scope.is_module_scope:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
+ lookup_func = '__Pyx_GetModuleGlobalName(%s, %s); %s'
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
+ lookup_func = '__Pyx_GetNameInClass(%s, {}, %s); %s'.format(
+ self.target.entry.scope.namespace_cname)
+ code.putln(lookup_func % (
+ target_node.result(),
+ interned_cname,
+ code.error_goto_if_null(target_node.result(), self.target.pos)))
+ code.put_gotref(target_node.result())
+ else:
+ target_node = self.target
+ from_py_node = ExprNodes.CoerceFromPyTypeNode(
+ self.loopvar_node.type, target_node, self.target.entry.scope)
+ from_py_node.temp_code = loopvar_name
+ from_py_node.generate_result_code(code)
+ if self.target.entry.is_pyglobal:
+ code.put_decref(target_node.result(), target_node.type)
+ target_node.release(code)
+
+ code.putln("}")
+
+ if not from_range and self.py_loopvar_node:
+ # This is potentially wasteful, but we don't want the semantics to
+ # depend on whether or not the loop is a python type.
+ self.py_loopvar_node.generate_evaluation_code(code)
+ self.target.generate_assignment_code(self.py_loopvar_node, code)
+ if from_range and not self.is_py_target:
+ code.funcstate.release_temp(loopvar_name)
+
+ break_label = code.break_label
+ code.set_loop_labels(old_loop_labels)
+ if self.else_clause:
+ code.putln("/*else*/ {")
+ self.else_clause.generate_execution_code(code)
+ code.putln("}")
+ code.put_label(break_label)
+ self.bound1.generate_disposal_code(code)
+ self.bound1.free_temps(code)
+ self.bound2.generate_disposal_code(code)
+ self.bound2.free_temps(code)
+ if isinstance(self.loopvar_node, ExprNodes.TempNode):
+ self.loopvar_node.release(code)
+ if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
+ self.py_loopvar_node.release(code)
+ if self.step is not None:
+ self.step.generate_disposal_code(code)
+ self.step.free_temps(code)
+
+ relation_table = {
+ # {relop : (initial offset, increment op)}
+ '<=': ("", "++"),
+ '<' : ("+1", "++"),
+ '>=': ("", "--"),
+ '>' : ("-1", "--"),
+ }
+
+ def generate_function_definitions(self, env, code):
+ self.target.generate_function_definitions(env, code)
+ self.bound1.generate_function_definitions(env, code)
+ self.bound2.generate_function_definitions(env, code)
+ if self.step is not None:
+ self.step.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.target.annotate(code)
+ self.bound1.annotate(code)
+ self.bound2.annotate(code)
+ if self.step:
+ self.step.annotate(code)
+ self.body.annotate(code)
+ if self.else_clause:
+ self.else_clause.annotate(code)
+
+
+class WithStatNode(StatNode):
+ """
+ Represents a Python with statement.
+
+ Implemented by the WithTransform as follows:
+
+ MGR = EXPR
+ EXIT = MGR.__exit__
+ VALUE = MGR.__enter__()
+ EXC = True
+ try:
+ try:
+ TARGET = VALUE # optional
+ BODY
+ except:
+ EXC = False
+ if not EXIT(*EXCINFO):
+ raise
+ finally:
+ if EXC:
+ EXIT(None, None, None)
+ MGR = EXIT = VALUE = None
+ """
+ # manager The with statement manager object
+ # target ExprNode the target lhs of the __enter__() call
+ # body StatNode
+ # enter_call ExprNode the call to the __enter__() method
+ # exit_var String the cname of the __exit__() method reference
+
+ child_attrs = ["manager", "enter_call", "target", "body"]
+
+ enter_call = None
+ target_temp = None
+
+ def analyse_declarations(self, env):
+ self.manager.analyse_declarations(env)
+ self.enter_call.analyse_declarations(env)
+ self.body.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.manager = self.manager.analyse_types(env)
+ self.enter_call = self.enter_call.analyse_types(env)
+ if self.target:
+ # set up target_temp before descending into body (which uses it)
+ from .ExprNodes import TempNode
+ self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type)
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.manager.generate_function_definitions(env, code)
+ self.enter_call.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ code.putln("/*with:*/ {")
+ self.manager.generate_evaluation_code(code)
+ self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
+ code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
+ self.exit_var,
+ self.manager.py_result(),
+ code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
+ code.error_goto_if_null(self.exit_var, self.pos),
+ ))
+ code.put_gotref(self.exit_var)
+
+ # need to free exit_var in the face of exceptions during setup
+ old_error_label = code.new_error_label()
+ intermediate_error_label = code.error_label
+
+ self.enter_call.generate_evaluation_code(code)
+ if self.target:
+ # The temp result will be cleaned up by the WithTargetAssignmentStatNode
+ # after assigning its result to the target of the 'with' statement.
+ self.target_temp.allocate(code)
+ self.enter_call.make_owned_reference(code)
+ code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result()))
+ self.enter_call.generate_post_assignment_code(code)
+ else:
+ self.enter_call.generate_disposal_code(code)
+ self.enter_call.free_temps(code)
+
+ self.manager.generate_disposal_code(code)
+ self.manager.free_temps(code)
+
+ code.error_label = old_error_label
+ self.body.generate_execution_code(code)
+
+ if code.label_used(intermediate_error_label):
+ step_over_label = code.new_label()
+ code.put_goto(step_over_label)
+ code.put_label(intermediate_error_label)
+ code.put_decref_clear(self.exit_var, py_object_type)
+ code.put_goto(old_error_label)
+ code.put_label(step_over_label)
+
+ code.funcstate.release_temp(self.exit_var)
+ code.putln('}')
+
+
+class WithTargetAssignmentStatNode(AssignmentNode):
+ # The target assignment of the 'with' statement value (return
+ # value of the __enter__() call).
+ #
+ # This is a special cased assignment that properly cleans up the RHS.
+ #
+ # lhs ExprNode the assignment target
+ # rhs ExprNode a (coerced) TempNode for the rhs (from WithStatNode)
+ # with_node WithStatNode the surrounding with-statement
+
+ child_attrs = ["rhs", "lhs"]
+ with_node = None
+ rhs = None
+
+ def analyse_declarations(self, env):
+ self.lhs.analyse_target_declaration(env)
+
+ def analyse_expressions(self, env):
+ self.lhs = self.lhs.analyse_target_types(env)
+ self.lhs.gil_assignment_check(env)
+ self.rhs = self.with_node.target_temp.coerce_to(self.lhs.type, env)
+ return self
+
+ def generate_execution_code(self, code):
+ self.rhs.generate_evaluation_code(code)
+ self.lhs.generate_assignment_code(self.rhs, code)
+ self.with_node.target_temp.release(code)
+
+ def annotate(self, code):
+ self.lhs.annotate(code)
+ self.rhs.annotate(code)
+
+
+class TryExceptStatNode(StatNode):
+ # try .. except statement
+ #
+ # body StatNode
+ # except_clauses [ExceptClauseNode]
+ # else_clause StatNode or None
+
+ child_attrs = ["body", "except_clauses", "else_clause"]
+ in_generator = False
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+ for except_clause in self.except_clauses:
+ except_clause.analyse_declarations(env)
+ if self.else_clause:
+ self.else_clause.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(env)
+ default_clause_seen = 0
+ for i, except_clause in enumerate(self.except_clauses):
+ except_clause = self.except_clauses[i] = except_clause.analyse_expressions(env)
+ if default_clause_seen:
+ error(except_clause.pos, "default 'except:' must be last")
+ if not except_clause.pattern:
+ default_clause_seen = 1
+ self.has_default_clause = default_clause_seen
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Try-except statement"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos) # before changing the error label, in case of tracing errors
+ code.putln("{")
+
+ old_return_label = code.return_label
+ old_break_label = code.break_label
+ old_continue_label = code.continue_label
+ old_error_label = code.new_error_label()
+ our_error_label = code.error_label
+ except_end_label = code.new_label('exception_handled')
+ except_error_label = code.new_label('except_error')
+ except_return_label = code.new_label('except_return')
+ try_return_label = code.new_label('try_return')
+ try_break_label = code.new_label('try_break') if old_break_label else None
+ try_continue_label = code.new_label('try_continue') if old_continue_label else None
+ try_end_label = code.new_label('try_end')
+
+ exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
+ for _ in range(3)]
+ save_exc = code.insertion_point()
+ code.putln(
+ "/*try:*/ {")
+ code.return_label = try_return_label
+ code.break_label = try_break_label
+ code.continue_label = try_continue_label
+ self.body.generate_execution_code(code)
+ code.mark_pos(self.pos, trace=False)
+ code.putln(
+ "}")
+ temps_to_clean_up = code.funcstate.all_free_managed_temps()
+ can_raise = code.label_used(our_error_label)
+
+ if can_raise:
+ # inject code before the try block to save away the exception state
+ code.globalstate.use_utility_code(reset_exception_utility_code)
+ if not self.in_generator:
+ save_exc.putln("__Pyx_PyThreadState_declare")
+ save_exc.putln("__Pyx_PyThreadState_assign")
+ save_exc.putln("__Pyx_ExceptionSave(%s);" % (
+ ', '.join(['&%s' % var for var in exc_save_vars])))
+ for var in exc_save_vars:
+ save_exc.put_xgotref(var)
+
+ def restore_saved_exception():
+ for name in exc_save_vars:
+ code.put_xgiveref(name)
+ code.putln("__Pyx_ExceptionReset(%s);" %
+ ', '.join(exc_save_vars))
+ else:
+ # try block cannot raise exceptions, but we had to allocate the temps above,
+ # so just keep the C compiler from complaining about them being unused
+ mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
+ save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
+
+ def restore_saved_exception():
+ pass
+
+ code.error_label = except_error_label
+ code.return_label = except_return_label
+ normal_case_terminates = self.body.is_terminator
+ if self.else_clause:
+ code.mark_pos(self.else_clause.pos)
+ code.putln(
+ "/*else:*/ {")
+ self.else_clause.generate_execution_code(code)
+ code.putln(
+ "}")
+ if not normal_case_terminates:
+ normal_case_terminates = self.else_clause.is_terminator
+
+ if can_raise:
+ if not normal_case_terminates:
+ for var in exc_save_vars:
+ code.put_xdecref_clear(var, py_object_type)
+ code.put_goto(try_end_label)
+ code.put_label(our_error_label)
+ for temp_name, temp_type in temps_to_clean_up:
+ code.put_xdecref_clear(temp_name, temp_type)
+
+ outer_except = code.funcstate.current_except
+ # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed.
+ code.funcstate.current_except = self
+ for except_clause in self.except_clauses:
+ except_clause.generate_handling_code(code, except_end_label)
+ code.funcstate.current_except = outer_except
+
+ if not self.has_default_clause:
+ code.put_goto(except_error_label)
+
+ for exit_label, old_label in [(except_error_label, old_error_label),
+ (try_break_label, old_break_label),
+ (try_continue_label, old_continue_label),
+ (try_return_label, old_return_label),
+ (except_return_label, old_return_label)]:
+ if code.label_used(exit_label):
+ if not normal_case_terminates and not code.label_used(try_end_label):
+ code.put_goto(try_end_label)
+ code.put_label(exit_label)
+ code.mark_pos(self.pos, trace=False)
+ if can_raise:
+ restore_saved_exception()
+ code.put_goto(old_label)
+
+ if code.label_used(except_end_label):
+ if not normal_case_terminates and not code.label_used(try_end_label):
+ code.put_goto(try_end_label)
+ code.put_label(except_end_label)
+ if can_raise:
+ restore_saved_exception()
+ if code.label_used(try_end_label):
+ code.put_label(try_end_label)
+ code.putln("}")
+
+ for cname in exc_save_vars:
+ code.funcstate.release_temp(cname)
+
+ code.return_label = old_return_label
+ code.break_label = old_break_label
+ code.continue_label = old_continue_label
+ code.error_label = old_error_label
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+ for except_clause in self.except_clauses:
+ except_clause.generate_function_definitions(env, code)
+ if self.else_clause is not None:
+ self.else_clause.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.body.annotate(code)
+ for except_node in self.except_clauses:
+ except_node.annotate(code)
+ if self.else_clause:
+ self.else_clause.annotate(code)
+
+
+class ExceptClauseNode(Node):
+ # Part of try ... except statement.
+ #
+ # pattern [ExprNode]
+ # target ExprNode or None
+ # body StatNode
+ # excinfo_target TupleNode(3*ResultRefNode) or None optional target for exception info (not owned here!)
+ # match_flag string result of exception match
+ # exc_value ExcValueNode used internally
+ # function_name string qualified name of enclosing function
+ # exc_vars (string * 3) local exception variables
+ # is_except_as bool Py3-style "except ... as xyz"
+
+ # excinfo_target is never set by the parser, but can be set by a transform
+ # in order to extract more extensive information about the exception as a
+ # sys.exc_info()-style tuple into a target variable
+
+ child_attrs = ["pattern", "target", "body", "exc_value"]
+
+ exc_value = None
+ excinfo_target = None
+ is_except_as = False
+
+ def analyse_declarations(self, env):
+ if self.target:
+ self.target.analyse_target_declaration(env)
+ self.body.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.function_name = env.qualified_name
+ if self.pattern:
+ # normalise/unpack self.pattern into a list
+ for i, pattern in enumerate(self.pattern):
+ pattern = pattern.analyse_expressions(env)
+ self.pattern[i] = pattern.coerce_to_pyobject(env)
+
+ if self.target:
+ from . import ExprNodes
+ self.exc_value = ExprNodes.ExcValueNode(self.pos)
+ self.target = self.target.analyse_target_expression(env, self.exc_value)
+
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_handling_code(self, code, end_label):
+ code.mark_pos(self.pos)
+
+ if self.pattern:
+ has_non_literals = not all(
+ pattern.is_literal or pattern.is_simple() and not pattern.is_temp
+ for pattern in self.pattern)
+
+ if has_non_literals:
+ # For non-trivial exception check expressions, hide the live exception from C-API calls.
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
+ code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ else:
+ exc_vars = ()
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+
+ exc_tests = []
+ for pattern in self.pattern:
+ pattern.generate_evaluation_code(code)
+ exc_tests.append(exc_test_func % pattern.py_result())
+
+ match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
+ for pattern in self.pattern:
+ pattern.generate_disposal_code(code)
+ pattern.free_temps(code)
+
+ if has_non_literals:
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
+ code.putln(
+ "if (%s) {" %
+ match_flag)
+ code.funcstate.release_temp(match_flag)
+ else:
+ code.putln("/*except:*/ {")
+
+ if (not getattr(self.body, 'stats', True)
+ and self.excinfo_target is None
+ and self.target is None):
+ # most simple case: no exception variable, empty body (pass)
+ # => reset the exception state, done
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrRestore(0,0,0);")
+ code.put_goto(end_label)
+ code.putln("}")
+ return
+
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.put_add_traceback(self.function_name)
+ # We always have to fetch the exception value even if
+ # there is no target, because this also normalises the
+ # exception and stores it in the thread state.
+ code.globalstate.use_utility_code(get_exception_utility_code)
+ exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
+ code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
+ exc_args, code.error_goto(self.pos)))
+ for var in exc_vars:
+ code.put_gotref(var)
+ if self.target:
+ self.exc_value.set_var(exc_vars[1])
+ self.exc_value.generate_evaluation_code(code)
+ self.target.generate_assignment_code(self.exc_value, code)
+ if self.excinfo_target is not None:
+ for tempvar, node in zip(exc_vars, self.excinfo_target.args):
+ node.set_var(tempvar)
+
+ old_break_label, old_continue_label = code.break_label, code.continue_label
+ code.break_label = code.new_label('except_break')
+ code.continue_label = code.new_label('except_continue')
+
+ old_exc_vars = code.funcstate.exc_vars
+ code.funcstate.exc_vars = exc_vars
+ self.body.generate_execution_code(code)
+ code.funcstate.exc_vars = old_exc_vars
+
+ if not self.body.is_terminator:
+ for var in exc_vars:
+ # FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars),
+ # but I don't think it's the right solution.
+ code.put_xdecref_clear(var, py_object_type)
+ code.put_goto(end_label)
+
+ for new_label, old_label in [(code.break_label, old_break_label),
+ (code.continue_label, old_continue_label)]:
+ if code.label_used(new_label):
+ code.put_label(new_label)
+ for var in exc_vars:
+ code.put_decref_clear(var, py_object_type)
+ code.put_goto(old_label)
+ code.break_label = old_break_label
+ code.continue_label = old_continue_label
+
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
+ code.putln(
+ "}")
+
+ def generate_function_definitions(self, env, code):
+ if self.target is not None:
+ self.target.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ if self.pattern:
+ for pattern in self.pattern:
+ pattern.annotate(code)
+ if self.target:
+ self.target.annotate(code)
+ self.body.annotate(code)
+
+
+class TryFinallyStatNode(StatNode):
+ # try ... finally statement
+ #
+ # body StatNode
+ # finally_clause StatNode
+ # finally_except_clause deep-copy of finally_clause for exception case
+ # in_generator inside of generator => must store away current exception also in return case
+ #
+ # Each of the continue, break, return and error gotos runs
+ # into its own deep-copy of the finally block code.
+ # In addition, if we're doing an error, we save the
+ # exception on entry to the finally block and restore
+ # it on exit.
+
+ child_attrs = ["body", "finally_clause", "finally_except_clause"]
+
+ preserve_exception = 1
+
+ # handle exception case, in addition to return/break/continue
+ handle_error_case = True
+ func_return_type = None
+ finally_except_clause = None
+
+ is_try_finally_in_nogil = False
+ in_generator = False
+
+ @staticmethod
+ def create_analysed(pos, env, body, finally_clause):
+ node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
+ return node
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+ self.finally_except_clause = copy.deepcopy(self.finally_clause)
+ self.finally_except_clause.analyse_declarations(env)
+ self.finally_clause.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(env)
+ self.finally_clause = self.finally_clause.analyse_expressions(env)
+ self.finally_except_clause = self.finally_except_clause.analyse_expressions(env)
+ if env.return_type and not env.return_type.is_void:
+ self.func_return_type = env.return_type
+ return self
+
+ nogil_check = Node.gil_error
+ gil_message = "Try-finally statement"
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos) # before changing the error label, in case of tracing errors
+ code.putln("/*try:*/ {")
+
+ old_error_label = code.error_label
+ old_labels = code.all_new_labels()
+ new_labels = code.get_all_labels()
+ new_error_label = code.error_label
+ if not self.handle_error_case:
+ code.error_label = old_error_label
+ catch_label = code.new_label()
+
+ was_in_try_finally = code.funcstate.in_try_finally
+ code.funcstate.in_try_finally = 1
+
+ self.body.generate_execution_code(code)
+
+ code.funcstate.in_try_finally = was_in_try_finally
+ code.putln("}")
+
+ temps_to_clean_up = code.funcstate.all_free_managed_temps()
+ code.mark_pos(self.finally_clause.pos)
+ code.putln("/*finally:*/ {")
+
+ # Reset labels only after writing out a potential line trace call for correct nogil error handling.
+ code.set_all_labels(old_labels)
+
+ def fresh_finally_clause(_next=[self.finally_clause]):
+ # generate the original subtree once and always keep a fresh copy
+ node = _next[0]
+ node_copy = copy.deepcopy(node)
+ if node is self.finally_clause:
+ _next[0] = node_copy
+ else:
+ node = node_copy
+ return node
+
+ preserve_error = self.preserve_exception and code.label_used(new_error_label)
+ needs_success_cleanup = not self.finally_clause.is_terminator
+
+ if not self.body.is_terminator:
+ code.putln('/*normal exit:*/{')
+ fresh_finally_clause().generate_execution_code(code)
+ if not self.finally_clause.is_terminator:
+ code.put_goto(catch_label)
+ code.putln('}')
+
+ if preserve_error:
+ code.put_label(new_error_label)
+ code.putln('/*exception exit:*/{')
+ if not self.in_generator:
+ code.putln("__Pyx_PyThreadState_declare")
+ if self.is_try_finally_in_nogil:
+ code.declare_gilstate()
+ if needs_success_cleanup:
+ exc_lineno_cnames = tuple([
+ code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ for _ in range(2)])
+ exc_filename_cname = code.funcstate.allocate_temp(
+ PyrexTypes.CPtrType(PyrexTypes.c_const_type(PyrexTypes.c_char_type)),
+ manage_ref=False)
+ else:
+ exc_lineno_cnames = exc_filename_cname = None
+ exc_vars = tuple([
+ code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+ for _ in range(6)])
+ self.put_error_catcher(
+ code, temps_to_clean_up, exc_vars, exc_lineno_cnames, exc_filename_cname)
+ finally_old_labels = code.all_new_labels()
+
+ code.putln('{')
+ old_exc_vars = code.funcstate.exc_vars
+ code.funcstate.exc_vars = exc_vars[:3]
+ self.finally_except_clause.generate_execution_code(code)
+ code.funcstate.exc_vars = old_exc_vars
+ code.putln('}')
+
+ if needs_success_cleanup:
+ self.put_error_uncatcher(code, exc_vars, exc_lineno_cnames, exc_filename_cname)
+ if exc_lineno_cnames:
+ for cname in exc_lineno_cnames:
+ code.funcstate.release_temp(cname)
+ if exc_filename_cname:
+ code.funcstate.release_temp(exc_filename_cname)
+ code.put_goto(old_error_label)
+
+ for new_label, old_label in zip(code.get_all_labels(), finally_old_labels):
+ if not code.label_used(new_label):
+ continue
+ code.put_label(new_label)
+ self.put_error_cleaner(code, exc_vars)
+ code.put_goto(old_label)
+
+ for cname in exc_vars:
+ code.funcstate.release_temp(cname)
+ code.putln('}')
+
+ code.set_all_labels(old_labels)
+ return_label = code.return_label
+ exc_vars = ()
+
+ for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
+ if not code.label_used(new_label):
+ continue
+ if new_label == new_error_label and preserve_error:
+ continue # handled above
+
+ code.putln('%s: {' % new_label)
+ ret_temp = None
+ if old_label == return_label:
+ # return actually raises an (uncatchable) exception in generators that we must preserve
+ if self.in_generator:
+ exc_vars = tuple([
+ code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+ for _ in range(6)])
+ self.put_error_catcher(code, [], exc_vars)
+ if not self.finally_clause.is_terminator:
+ # store away return value for later reuse
+ if (self.func_return_type and
+ not self.is_try_finally_in_nogil and
+ not isinstance(self.finally_clause, GILExitNode)):
+ ret_temp = code.funcstate.allocate_temp(
+ self.func_return_type, manage_ref=False)
+ code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
+ if self.func_return_type.is_pyobject:
+ code.putln("%s = 0;" % Naming.retval_cname)
+
+ fresh_finally_clause().generate_execution_code(code)
+
+ if old_label == return_label:
+ if ret_temp:
+ code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
+ if self.func_return_type.is_pyobject:
+ code.putln("%s = 0;" % ret_temp)
+ code.funcstate.release_temp(ret_temp)
+ if self.in_generator:
+ self.put_error_uncatcher(code, exc_vars)
+ for cname in exc_vars:
+ code.funcstate.release_temp(cname)
+
+ if not self.finally_clause.is_terminator:
+ code.put_goto(old_label)
+ code.putln('}')
+
+ # End finally
+ code.put_label(catch_label)
+ code.putln(
+ "}")
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+ self.finally_clause.generate_function_definitions(env, code)
+ if self.finally_except_clause:
+ self.finally_except_clause.generate_function_definitions(env, code)
+
+ def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
+ exc_lineno_cnames=None, exc_filename_cname=None):
+ code.globalstate.use_utility_code(restore_exception_utility_code)
+ code.globalstate.use_utility_code(get_exception_utility_code)
+ code.globalstate.use_utility_code(swap_exception_utility_code)
+
+ if self.is_try_finally_in_nogil:
+ code.put_ensure_gil(declare_gilstate=False)
+ code.putln("__Pyx_PyThreadState_assign")
+
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp_name, type in temps_to_clean_up:
+ code.put_xdecref_clear(temp_name, type)
+
+ # not using preprocessor here to avoid warnings about
+ # unused utility functions and/or temps
+ code.putln("if (PY_MAJOR_VERSION >= 3)"
+ " __Pyx_ExceptionSwap(&%s, &%s, &%s);" % exc_vars[3:])
+ code.putln("if ((PY_MAJOR_VERSION < 3) ||"
+ # if __Pyx_GetException() fails in Py3,
+ # store the newly raised exception instead
+ " unlikely(__Pyx_GetException(&%s, &%s, &%s) < 0)) "
+ "__Pyx_ErrFetch(&%s, &%s, &%s);" % (exc_vars[:3] * 2))
+ for var in exc_vars:
+ code.put_xgotref(var)
+ if exc_lineno_cnames:
+ code.putln("%s = %s; %s = %s; %s = %s;" % (
+ exc_lineno_cnames[0], Naming.lineno_cname,
+ exc_lineno_cnames[1], Naming.clineno_cname,
+ exc_filename_cname, Naming.filename_cname))
+
+ if self.is_try_finally_in_nogil:
+ code.put_release_ensured_gil()
+
+ def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
+ code.globalstate.use_utility_code(restore_exception_utility_code)
+ code.globalstate.use_utility_code(reset_exception_utility_code)
+
+ if self.is_try_finally_in_nogil:
+ code.put_ensure_gil(declare_gilstate=False)
+
+ # not using preprocessor here to avoid warnings about
+ # unused utility functions and/or temps
+ code.putln("if (PY_MAJOR_VERSION >= 3) {")
+ for var in exc_vars[3:]:
+ code.put_xgiveref(var)
+ code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
+ code.putln("}")
+ for var in exc_vars[:3]:
+ code.put_xgiveref(var)
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % exc_vars[:3])
+
+ if self.is_try_finally_in_nogil:
+ code.put_release_ensured_gil()
+
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ if exc_lineno_cnames:
+ code.putln("%s = %s; %s = %s; %s = %s;" % (
+ Naming.lineno_cname, exc_lineno_cnames[0],
+ Naming.clineno_cname, exc_lineno_cnames[1],
+ Naming.filename_cname, exc_filename_cname))
+
+ def put_error_cleaner(self, code, exc_vars):
+ code.globalstate.use_utility_code(reset_exception_utility_code)
+ if self.is_try_finally_in_nogil:
+ code.put_ensure_gil(declare_gilstate=False)
+
+ # not using preprocessor here to avoid warnings about
+ # unused utility functions and/or temps
+ code.putln("if (PY_MAJOR_VERSION >= 3) {")
+ for var in exc_vars[3:]:
+ code.put_xgiveref(var)
+ code.putln("__Pyx_ExceptionReset(%s, %s, %s);" % exc_vars[3:])
+ code.putln("}")
+ for var in exc_vars[:3]:
+ code.put_xdecref_clear(var, py_object_type)
+ if self.is_try_finally_in_nogil:
+ code.put_release_ensured_gil()
+ code.putln(' '.join(["%s = 0;"]*3) % exc_vars[3:])
+
+ def annotate(self, code):
+ self.body.annotate(code)
+ self.finally_clause.annotate(code)
+
+
+class NogilTryFinallyStatNode(TryFinallyStatNode):
+ """
+ A try/finally statement that may be used in nogil code sections.
+ """
+
+ preserve_exception = False
+ nogil_check = None
+
+
+class GILStatNode(NogilTryFinallyStatNode):
+ # 'with gil' or 'with nogil' statement
+ #
+ # state string 'gil' or 'nogil'
+
+ state_temp = None
+
+ def __init__(self, pos, state, body):
+ self.state = state
+ self.create_state_temp_if_needed(pos, state, body)
+ TryFinallyStatNode.__init__(
+ self, pos,
+ body=body,
+ finally_clause=GILExitNode(
+ pos, state=state, state_temp=self.state_temp))
+
+ def create_state_temp_if_needed(self, pos, state, body):
+ from .ParseTreeTransforms import YieldNodeCollector
+ collector = YieldNodeCollector()
+ collector.visitchildren(body)
+ if not collector.yields:
+ return
+
+ if state == 'gil':
+ temp_type = PyrexTypes.c_gilstate_type
+ else:
+ temp_type = PyrexTypes.c_threadstate_ptr_type
+ from . import ExprNodes
+ self.state_temp = ExprNodes.TempNode(pos, temp_type)
+
+ def analyse_declarations(self, env):
+ env._in_with_gil_block = (self.state == 'gil')
+ if self.state == 'gil':
+ env.has_with_gil_block = True
+
+ return super(GILStatNode, self).analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ env.use_utility_code(
+ UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+ was_nogil = env.nogil
+ env.nogil = self.state == 'nogil'
+ node = TryFinallyStatNode.analyse_expressions(self, env)
+ env.nogil = was_nogil
+ return node
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ code.begin_block()
+ if self.state_temp:
+ self.state_temp.allocate(code)
+ variable = self.state_temp.result()
+ else:
+ variable = None
+
+ old_gil_config = code.funcstate.gil_owned
+ if self.state == 'gil':
+ code.put_ensure_gil(variable=variable)
+ code.funcstate.gil_owned = True
+ else:
+ code.put_release_gil(variable=variable)
+ code.funcstate.gil_owned = False
+
+ TryFinallyStatNode.generate_execution_code(self, code)
+
+ if self.state_temp:
+ self.state_temp.release(code)
+
+ code.funcstate.gil_owned = old_gil_config
+ code.end_block()
+
+
+class GILExitNode(StatNode):
+ """
+ Used as the 'finally' block in a GILStatNode
+
+ state string 'gil' or 'nogil'
+ """
+
+ child_attrs = []
+ state_temp = None
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ if self.state_temp:
+ variable = self.state_temp.result()
+ else:
+ variable = None
+
+ if self.state == 'gil':
+ code.put_release_ensured_gil(variable)
+ else:
+ code.put_acquire_gil(variable)
+
+
+class EnsureGILNode(GILExitNode):
+ """
+ Ensure the GIL in nogil functions for cleanup before returning.
+ """
+
+ def generate_execution_code(self, code):
+ code.put_ensure_gil(declare_gilstate=False)
+
+
+def cython_view_utility_code():
+ from . import MemoryView
+ return MemoryView.view_utility_code
+
+
+utility_code_for_cimports = {
+ # utility code (or inlining c) in a pxd (or pyx) file.
+ # TODO: Consider a generic user-level mechanism for importing
+ 'cpython.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"),
+ 'cpython.array.array' : lambda : UtilityCode.load_cached("ArrayAPI", "arrayarray.h"),
+ 'cython.view' : cython_view_utility_code,
+}
+
+utility_code_for_imports = {
+ # utility code used when special modules are imported.
+ # TODO: Consider a generic user-level mechanism for importing
+ 'asyncio': ("__Pyx_patch_asyncio", "PatchAsyncIO", "Coroutine.c"),
+ 'inspect': ("__Pyx_patch_inspect", "PatchInspect", "Coroutine.c"),
+}
+
+
+class CImportStatNode(StatNode):
+ # cimport statement
+ #
+ # module_name string Qualified name of module being imported
+ # as_name string or None Name specified in "as" clause, if any
+ # is_absolute bool True for absolute imports, False otherwise
+
+ child_attrs = []
+ is_absolute = False
+
+ def analyse_declarations(self, env):
+ if not env.is_module_scope:
+ error(self.pos, "cimport only allowed at module level")
+ return
+ module_scope = env.find_module(
+ self.module_name, self.pos, relative_level=0 if self.is_absolute else -1)
+ if "." in self.module_name:
+ names = [EncodedString(name) for name in self.module_name.split(".")]
+ top_name = names[0]
+ top_module_scope = env.context.find_submodule(top_name)
+ module_scope = top_module_scope
+ for name in names[1:]:
+ submodule_scope = module_scope.find_submodule(name)
+ module_scope.declare_module(name, submodule_scope, self.pos)
+ module_scope = submodule_scope
+ if self.as_name:
+ env.declare_module(self.as_name, module_scope, self.pos)
+ else:
+ env.add_imported_module(module_scope)
+ env.declare_module(top_name, top_module_scope, self.pos)
+ else:
+ name = self.as_name or self.module_name
+ env.declare_module(name, module_scope, self.pos)
+ if self.module_name in utility_code_for_cimports:
+ env.use_utility_code(utility_code_for_cimports[self.module_name]())
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class FromCImportStatNode(StatNode):
+ # from ... cimport statement
+ #
+ # module_name string Qualified name of module
+ # relative_level int or None Relative import: number of dots before module_name
+ # imported_names [(pos, name, as_name, kind)] Names to be imported
+
+ child_attrs = []
+ module_name = None
+ relative_level = None
+ imported_names = None
+
+ def analyse_declarations(self, env):
+ if not env.is_module_scope:
+ error(self.pos, "cimport only allowed at module level")
+ return
+ if self.relative_level and self.relative_level > env.qualified_name.count('.'):
+ error(self.pos, "relative cimport beyond main package is not allowed")
+ return
+ module_scope = env.find_module(self.module_name, self.pos, relative_level=self.relative_level, need_pxd=0)
+ module_name = module_scope.qualified_name
+ env.add_imported_module(module_scope)
+ for pos, name, as_name, kind in self.imported_names:
+ if name == "*":
+ for local_name, entry in list(module_scope.entries.items()):
+ env.add_imported_entry(local_name, entry, pos)
+ else:
+ entry = module_scope.lookup(name)
+ if entry:
+ if kind and not self.declaration_matches(entry, kind):
+ entry.redeclared(pos)
+ entry.used = 1
+ else:
+ if kind == 'struct' or kind == 'union':
+ entry = module_scope.declare_struct_or_union(
+ name, kind=kind, scope=None, typedef_flag=0, pos=pos)
+ elif kind == 'class':
+ entry = module_scope.declare_c_class(name, pos=pos, module_name=module_name)
+ else:
+ submodule_scope = env.context.find_module(
+ name, relative_to=module_scope, pos=self.pos, absolute_fallback=False)
+ if submodule_scope.parent_module is module_scope:
+ env.declare_module(as_name or name, submodule_scope, self.pos)
+ else:
+ error(pos, "Name '%s' not declared in module '%s'" % (name, module_name))
+
+ if entry:
+ local_name = as_name or name
+ env.add_imported_entry(local_name, entry, pos)
+
+ if module_name.startswith('cpython') or module_name.startswith('cython'): # enough for now
+ if module_name in utility_code_for_cimports:
+ env.use_utility_code(utility_code_for_cimports[module_name]())
+ for _, name, _, _ in self.imported_names:
+ fqname = '%s.%s' % (module_name, name)
+ if fqname in utility_code_for_cimports:
+ env.use_utility_code(utility_code_for_cimports[fqname]())
+
+ def declaration_matches(self, entry, kind):
+ if not entry.is_type:
+ return 0
+ type = entry.type
+ if kind == 'class':
+ if not type.is_extension_type:
+ return 0
+ else:
+ if not type.is_struct_or_union:
+ return 0
+ if kind != type.kind:
+ return 0
+ return 1
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_execution_code(self, code):
+ pass
+
+
+class FromImportStatNode(StatNode):
+ # from ... import statement
+ #
+ # module ImportNode
+ # items [(string, NameNode)]
+ # interned_items [(string, NameNode, ExprNode)]
+ # item PyTempNode used internally
+ # import_star boolean used internally
+
+ child_attrs = ["module"]
+ import_star = 0
+
+ def analyse_declarations(self, env):
+ for name, target in self.items:
+ if name == "*":
+ if not env.is_module_scope:
+ error(self.pos, "import * only allowed at module level")
+ return
+ env.has_import_star = 1
+ self.import_star = 1
+ else:
+ target.analyse_target_declaration(env)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.module = self.module.analyse_expressions(env)
+ self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
+ self.interned_items = []
+ for name, target in self.items:
+ if name == '*':
+ for _, entry in env.entries.items():
+ if not entry.is_type and entry.type.is_extension_type:
+ env.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
+ break
+ else:
+ entry = env.lookup(target.name)
+ # check whether or not entry is already cimported
+ if (entry.is_type and entry.type.name == name
+ and hasattr(entry.type, 'module_name')):
+ if entry.type.module_name == self.module.module_name.value:
+ # cimported with absolute name
+ continue
+ try:
+ # cimported with relative name
+ module = env.find_module(self.module.module_name.value, pos=self.pos,
+ relative_level=self.module.level)
+ if entry.type.module_name == module.qualified_name:
+ continue
+ except AttributeError:
+ pass
+ target = target.analyse_target_expression(env, None) # FIXME?
+ if target.type is py_object_type:
+ coerced_item = None
+ else:
+ coerced_item = self.item.coerce_to(target.type, env)
+ self.interned_items.append((name, target, coerced_item))
+ return self
+
+ def generate_execution_code(self, code):
+ code.mark_pos(self.pos)
+ self.module.generate_evaluation_code(code)
+ if self.import_star:
+ code.putln(
+ 'if (%s(%s) < 0) %s;' % (
+ Naming.import_star,
+ self.module.py_result(),
+ code.error_goto(self.pos)))
+ item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ self.item.set_cname(item_temp)
+ if self.interned_items:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ImportFrom", "ImportExport.c"))
+ for name, target, coerced_item in self.interned_items:
+ code.putln(
+ '%s = __Pyx_ImportFrom(%s, %s); %s' % (
+ item_temp,
+ self.module.py_result(),
+ code.intern_identifier(name),
+ code.error_goto_if_null(item_temp, self.pos)))
+ code.put_gotref(item_temp)
+ if coerced_item is None:
+ target.generate_assignment_code(self.item, code)
+ else:
+ coerced_item.allocate_temp_result(code)
+ coerced_item.generate_result_code(code)
+ target.generate_assignment_code(coerced_item, code)
+ code.put_decref_clear(item_temp, py_object_type)
+ code.funcstate.release_temp(item_temp)
+ self.module.generate_disposal_code(code)
+ self.module.free_temps(code)
+
+
+class ParallelNode(Node):
+ """
+ Base class for cython.parallel constructs.
+ """
+
+ nogil_check = None
+
+
+class ParallelStatNode(StatNode, ParallelNode):
+ """
+ Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
+
+ assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
+ assignments to variables in this parallel section
+
+ parent parent ParallelStatNode or None
+ is_parallel indicates whether this node is OpenMP parallel
+ (true for #pragma omp parallel for and
+ #pragma omp parallel)
+
+ is_parallel is true for:
+
+ #pragma omp parallel
+ #pragma omp parallel for
+
+ sections, but NOT for
+
+ #pragma omp for
+
+ We need this to determine the sharing attributes.
+
+ privatization_insertion_point a code insertion point used to make temps
+ private (esp. the "nsteps" temp)
+
+ args tuple the arguments passed to the parallel construct
+ kwargs DictNode the keyword arguments passed to the parallel
+ construct (replaced by its compile time value)
+ """
+
+ child_attrs = ['body', 'num_threads']
+
+ body = None
+
+ is_prange = False
+ is_nested_prange = False
+
+ error_label_used = False
+
+ num_threads = None
+ chunksize = None
+
+ parallel_exc = (
+ Naming.parallel_exc_type,
+ Naming.parallel_exc_value,
+ Naming.parallel_exc_tb,
+ )
+
+ parallel_pos_info = (
+ Naming.parallel_filename,
+ Naming.parallel_lineno,
+ Naming.parallel_clineno,
+ )
+
+ pos_info = (
+ Naming.filename_cname,
+ Naming.lineno_cname,
+ Naming.clineno_cname,
+ )
+
+ critical_section_counter = 0
+
+ def __init__(self, pos, **kwargs):
+ super(ParallelStatNode, self).__init__(pos, **kwargs)
+
+ # All assignments in this scope
+ self.assignments = kwargs.get('assignments') or {}
+
+ # All seen closure cnames and their temporary cnames
+ self.seen_closure_vars = set()
+
+ # Dict of variables that should be declared (first|last|)private or
+ # reduction { Entry: (op, lastprivate) }.
+ # If op is not None, it's a reduction.
+ self.privates = {}
+
+ # [NameNode]
+ self.assigned_nodes = []
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+
+ self.num_threads = None
+
+ if self.kwargs:
+ # Try to find num_threads and chunksize keyword arguments
+ pairs = []
+ seen = set()
+ for dictitem in self.kwargs.key_value_pairs:
+ if dictitem.key.value in seen:
+ error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
+ seen.add(dictitem.key.value)
+ if dictitem.key.value == 'num_threads':
+ if not dictitem.value.is_none:
+ self.num_threads = dictitem.value
+ elif self.is_prange and dictitem.key.value == 'chunksize':
+ if not dictitem.value.is_none:
+ self.chunksize = dictitem.value
+ else:
+ pairs.append(dictitem)
+
+ self.kwargs.key_value_pairs = pairs
+
+ try:
+ self.kwargs = self.kwargs.compile_time_value(env)
+ except Exception as e:
+ error(self.kwargs.pos, "Only compile-time values may be "
+ "supplied as keyword arguments")
+ else:
+ self.kwargs = {}
+
+ for kw, val in self.kwargs.items():
+ if kw not in self.valid_keyword_arguments:
+ error(self.pos, "Invalid keyword argument: %s" % kw)
+ else:
+ setattr(self, kw, val)
+
+ def analyse_expressions(self, env):
+ if self.num_threads:
+ self.num_threads = self.num_threads.analyse_expressions(env)
+
+ if self.chunksize:
+ self.chunksize = self.chunksize.analyse_expressions(env)
+
+ self.body = self.body.analyse_expressions(env)
+ self.analyse_sharing_attributes(env)
+
+ if self.num_threads is not None:
+ if self.parent and self.parent.num_threads is not None and not self.parent.is_prange:
+ error(self.pos, "num_threads already declared in outer section")
+ elif self.parent and not self.parent.is_prange:
+ error(self.pos, "num_threads must be declared in the parent parallel section")
+ elif (self.num_threads.type.is_int and
+ self.num_threads.is_literal and
+ self.num_threads.compile_time_value(env) <= 0):
+ error(self.pos, "argument to num_threads must be greater than 0")
+
+ if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
+ self.num_threads = self.num_threads.coerce_to(
+ PyrexTypes.c_int_type, env).coerce_to_temp(env)
+ return self
+
+ def analyse_sharing_attributes(self, env):
+ """
+ Analyse the privates for this block and set them in self.privates.
+ This should be called in a post-order fashion during the
+ analyse_expressions phase
+ """
+ for entry, (pos, op) in self.assignments.items():
+
+ if self.is_prange and not self.is_parallel:
+ # closely nested prange in a with parallel block, disallow
+ # assigning to privates in the with parallel block (we
+ # consider it too implicit and magicky for users)
+ if entry in self.parent.assignments:
+ error(pos, "Cannot assign to private of outer parallel block")
+ continue
+
+ if not self.is_prange and op:
+ # Again possible, but considered to magicky
+ error(pos, "Reductions not allowed for parallel blocks")
+ continue
+
+ # By default all variables should have the same values as if
+ # executed sequentially
+ lastprivate = True
+ self.propagate_var_privatization(entry, pos, op, lastprivate)
+
+ def propagate_var_privatization(self, entry, pos, op, lastprivate):
+ """
+ Propagate the sharing attributes of a variable. If the privatization is
+ determined by a parent scope, done propagate further.
+
+ If we are a prange, we propagate our sharing attributes outwards to
+ other pranges. If we are a prange in parallel block and the parallel
+ block does not determine the variable private, we propagate to the
+ parent of the parent. Recursion stops at parallel blocks, as they have
+ no concept of lastprivate or reduction.
+
+ So the following cases propagate:
+
+ sum is a reduction for all loops:
+
+ for i in prange(n):
+ for j in prange(n):
+ for k in prange(n):
+ sum += i * j * k
+
+ sum is a reduction for both loops, local_var is private to the
+ parallel with block:
+
+ for i in prange(n):
+ with parallel:
+ local_var = ... # private to the parallel
+ for j in prange(n):
+ sum += i * j
+
+ Nested with parallel blocks are disallowed, because they wouldn't
+ allow you to propagate lastprivates or reductions:
+
+ #pragma omp parallel for lastprivate(i)
+ for i in prange(n):
+
+ sum = 0
+
+ #pragma omp parallel private(j, sum)
+ with parallel:
+
+ #pragma omp parallel
+ with parallel:
+
+ #pragma omp for lastprivate(j) reduction(+:sum)
+ for j in prange(n):
+ sum += i
+
+ # sum and j are well-defined here
+
+ # sum and j are undefined here
+
+ # sum and j are undefined here
+ """
+ self.privates[entry] = (op, lastprivate)
+
+ if entry.type.is_memoryviewslice:
+ error(pos, "Memoryview slices can only be shared in parallel sections")
+ return
+
+ if self.is_prange:
+ if not self.is_parallel and entry not in self.parent.assignments:
+ # Parent is a parallel with block
+ parent = self.parent.parent
+ else:
+ parent = self.parent
+
+ # We don't need to propagate privates, only reductions and
+ # lastprivates
+ if parent and (op or lastprivate):
+ parent.propagate_var_privatization(entry, pos, op, lastprivate)
+
+ def _allocate_closure_temp(self, code, entry):
+ """
+ Helper function that allocate a temporary for a closure variable that
+ is assigned to.
+ """
+ if self.parent:
+ return self.parent._allocate_closure_temp(code, entry)
+
+ if entry.cname in self.seen_closure_vars:
+ return entry.cname
+
+ cname = code.funcstate.allocate_temp(entry.type, True)
+
+ # Add both the actual cname and the temp cname, as the actual cname
+ # will be replaced with the temp cname on the entry
+ self.seen_closure_vars.add(entry.cname)
+ self.seen_closure_vars.add(cname)
+
+ self.modified_entries.append((entry, entry.cname))
+ code.putln("%s = %s;" % (cname, entry.cname))
+ entry.cname = cname
+
+ def initialize_privates_to_nan(self, code, exclude=None):
+ first = True
+
+ for entry, (op, lastprivate) in sorted(self.privates.items()):
+ if not op and (not exclude or entry != exclude):
+ invalid_value = entry.type.invalid_value()
+
+ if invalid_value:
+ if first:
+ code.putln("/* Initialize private variables to "
+ "invalid values */")
+ first = False
+ code.putln("%s = %s;" % (entry.cname,
+ entry.type.cast_code(invalid_value)))
+
+ def evaluate_before_block(self, code, expr):
+ c = self.begin_of_parallel_control_block_point_after_decls
+ # we need to set the owner to ourselves temporarily, as
+ # allocate_temp may generate a comment in the middle of our pragma
+ # otherwise when DebugFlags.debug_temp_code_comments is in effect
+ owner = c.funcstate.owner
+ c.funcstate.owner = c
+ expr.generate_evaluation_code(c)
+ c.funcstate.owner = owner
+
+ return expr.result()
+
+ def put_num_threads(self, code):
+ """
+ Write self.num_threads if set as the num_threads OpenMP directive
+ """
+ if self.num_threads is not None:
+ code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads))
+
+
+ def declare_closure_privates(self, code):
+ """
+ If a variable is in a scope object, we need to allocate a temp and
+ assign the value from the temp to the variable in the scope object
+ after the parallel section. This kind of copying should be done only
+ in the outermost parallel section.
+ """
+ self.modified_entries = []
+
+ for entry in sorted(self.assignments):
+ if entry.from_closure or entry.in_closure:
+ self._allocate_closure_temp(code, entry)
+
+ def release_closure_privates(self, code):
+ """
+ Release any temps used for variables in scope objects. As this is the
+ outermost parallel block, we don't need to delete the cnames from
+ self.seen_closure_vars.
+ """
+ for entry, original_cname in self.modified_entries:
+ code.putln("%s = %s;" % (original_cname, entry.cname))
+ code.funcstate.release_temp(entry.cname)
+ entry.cname = original_cname
+
+ def privatize_temps(self, code, exclude_temps=()):
+ """
+ Make any used temporaries private. Before the relevant code block
+ code.start_collecting_temps() should have been called.
+ """
+ c = self.privatization_insertion_point
+ self.privatization_insertion_point = None
+
+ if self.is_parallel:
+ self.temps = temps = code.funcstate.stop_collecting_temps()
+ privates, firstprivates = [], []
+ for temp, type in sorted(temps):
+ if type.is_pyobject or type.is_memoryviewslice:
+ firstprivates.append(temp)
+ else:
+ privates.append(temp)
+
+ if privates:
+ c.put(" private(%s)" % ", ".join(privates))
+ if firstprivates:
+ c.put(" firstprivate(%s)" % ", ".join(firstprivates))
+
+ if self.breaking_label_used:
+ shared_vars = [Naming.parallel_why]
+ if self.error_label_used:
+ shared_vars.extend(self.parallel_exc)
+ c.put(" private(%s, %s, %s)" % self.pos_info)
+
+ c.put(" shared(%s)" % ', '.join(shared_vars))
+
+ def cleanup_temps(self, code):
+ # Now clean up any memoryview slice and object temporaries
+ if self.is_parallel and not self.is_nested_prange:
+ code.putln("/* Clean up any temporaries */")
+ for temp, type in sorted(self.temps):
+ if type.is_memoryviewslice:
+ code.put_xdecref_memoryviewslice(temp, have_gil=False)
+ elif type.is_pyobject:
+ code.put_xdecref(temp, type)
+ code.putln("%s = NULL;" % temp)
+
+ def setup_parallel_control_flow_block(self, code):
+ """
+ Sets up a block that surrounds the parallel block to determine
+ how the parallel section was exited. Any kind of return is
+ trapped (break, continue, return, exceptions). This is the idea:
+
+ {
+ int why = 0;
+
+ #pragma omp parallel
+ {
+ return # -> goto new_return_label;
+ goto end_parallel;
+
+ new_return_label:
+ why = 3;
+ goto end_parallel;
+
+ end_parallel:;
+ #pragma omp flush(why) # we need to flush for every iteration
+ }
+
+ if (why == 3)
+ goto old_return_label;
+ }
+ """
+ self.old_loop_labels = code.new_loop_labels()
+ self.old_error_label = code.new_error_label()
+ self.old_return_label = code.return_label
+ code.return_label = code.new_label(name="return")
+
+ code.begin_block() # parallel control flow block
+ self.begin_of_parallel_control_block_point = code.insertion_point()
+ self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
+
+ self.undef_builtin_expect_apple_gcc_bug(code)
+
+ def begin_parallel_block(self, code):
+ """
+ Each OpenMP thread in a parallel section that contains a with gil block
+ must have the thread-state initialized. The call to
+ PyGILState_Release() then deallocates our threadstate. If we wouldn't
+ do this, each with gil block would allocate and deallocate one, thereby
+ losing exception information before it can be saved before leaving the
+ parallel section.
+ """
+ self.begin_of_parallel_block = code.insertion_point()
+
+ def end_parallel_block(self, code):
+ """
+ To ensure all OpenMP threads have thread states, we ensure the GIL
+ in each thread (which creates a thread state if it doesn't exist),
+ after which we release the GIL.
+ On exit, reacquire the GIL and release the thread state.
+
+ If compiled without OpenMP support (at the C level), then we still have
+ to acquire the GIL to decref any object temporaries.
+ """
+ begin_code = self.begin_of_parallel_block
+ self.begin_of_parallel_block = None
+
+ if self.error_label_used:
+ end_code = code
+
+ begin_code.putln("#ifdef _OPENMP")
+ begin_code.put_ensure_gil(declare_gilstate=True)
+ begin_code.putln("Py_BEGIN_ALLOW_THREADS")
+ begin_code.putln("#endif /* _OPENMP */")
+
+ end_code.putln("#ifdef _OPENMP")
+ end_code.putln("Py_END_ALLOW_THREADS")
+ end_code.putln("#else")
+ end_code.put_safe("{\n")
+ end_code.put_ensure_gil()
+ end_code.putln("#endif /* _OPENMP */")
+ self.cleanup_temps(end_code)
+ end_code.put_release_ensured_gil()
+ end_code.putln("#ifndef _OPENMP")
+ end_code.put_safe("}\n")
+ end_code.putln("#endif /* _OPENMP */")
+
+ def trap_parallel_exit(self, code, should_flush=False):
+ """
+ Trap any kind of return inside a parallel construct. 'should_flush'
+ indicates whether the variable should be flushed, which is needed by
+ prange to skip the loop. It also indicates whether we need to register
+ a continue (we need this for parallel blocks, but not for prange
+ loops, as it is a direct jump there).
+
+ It uses the same mechanism as try/finally:
+ 1 continue
+ 2 break
+ 3 return
+ 4 error
+ """
+ save_lastprivates_label = code.new_label()
+ dont_return_label = code.new_label()
+
+ self.any_label_used = False
+ self.breaking_label_used = False
+ self.error_label_used = False
+
+ self.parallel_private_temps = []
+
+ all_labels = code.get_all_labels()
+
+ # Figure this out before starting to generate any code
+ for label in all_labels:
+ if code.label_used(label):
+ self.breaking_label_used = (self.breaking_label_used or
+ label != code.continue_label)
+ self.any_label_used = True
+
+ if self.any_label_used:
+ code.put_goto(dont_return_label)
+
+ for i, label in enumerate(all_labels):
+ if not code.label_used(label):
+ continue
+
+ is_continue_label = label == code.continue_label
+
+ code.put_label(label)
+
+ if not (should_flush and is_continue_label):
+ if label == code.error_label:
+ self.error_label_used = True
+ self.fetch_parallel_exception(code)
+
+ code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
+
+ if (self.breaking_label_used and self.is_prange and not
+ is_continue_label):
+ code.put_goto(save_lastprivates_label)
+ else:
+ code.put_goto(dont_return_label)
+
+ if self.any_label_used:
+ if self.is_prange and self.breaking_label_used:
+ # Don't rely on lastprivate, save our lastprivates
+ code.put_label(save_lastprivates_label)
+ self.save_parallel_vars(code)
+
+ code.put_label(dont_return_label)
+
+ if should_flush and self.breaking_label_used:
+ code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
+
+ def save_parallel_vars(self, code):
+ """
+ The following shenanigans are instated when we break, return or
+ propagate errors from a prange. In this case we cannot rely on
+ lastprivate() to do its job, as no iterations may have executed yet
+ in the last thread, leaving the values undefined. It is most likely
+ that the breaking thread has well-defined values of the lastprivate
+ variables, so we keep those values.
+ """
+ section_name = "__pyx_parallel_lastprivates%d" % self.critical_section_counter
+ code.putln_openmp("#pragma omp critical(%s)" % section_name)
+ ParallelStatNode.critical_section_counter += 1
+
+ code.begin_block() # begin critical section
+
+ c = self.begin_of_parallel_control_block_point
+
+ temp_count = 0
+ for entry, (op, lastprivate) in sorted(self.privates.items()):
+ if not lastprivate or entry.type.is_pyobject:
+ continue
+
+ type_decl = entry.type.empty_declaration_code()
+ temp_cname = "__pyx_parallel_temp%d" % temp_count
+ private_cname = entry.cname
+
+ temp_count += 1
+
+ invalid_value = entry.type.invalid_value()
+ if invalid_value:
+ init = ' = ' + entry.type.cast_code(invalid_value)
+ else:
+ init = ''
+ # Declare the parallel private in the outer block
+ c.putln("%s %s%s;" % (type_decl, temp_cname, init))
+
+ # Initialize before escaping
+ code.putln("%s = %s;" % (temp_cname, private_cname))
+
+ self.parallel_private_temps.append((temp_cname, private_cname))
+
+ code.end_block() # end critical section
+
+ def fetch_parallel_exception(self, code):
+ """
+ As each OpenMP thread may raise an exception, we need to fetch that
+ exception from the threadstate and save it for after the parallel
+ section where it can be re-raised in the master thread.
+
+ Although it would seem that __pyx_filename, __pyx_lineno and
+ __pyx_clineno are only assigned to under exception conditions (i.e.,
+ when we have the GIL), and thus should be allowed to be shared without
+ any race condition, they are in fact subject to the same race
+ conditions that they were previously when they were global variables
+ and functions were allowed to release the GIL:
+
+ thread A thread B
+ acquire
+ set lineno
+ release
+ acquire
+ set lineno
+ release
+ acquire
+ fetch exception
+ release
+ skip the fetch
+
+ deallocate threadstate deallocate threadstate
+ """
+ code.begin_block()
+ code.put_ensure_gil(declare_gilstate=True)
+
+ code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
+ code.putln(
+ "if (!%s) {" % Naming.parallel_exc_type)
+
+ code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc)
+ pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
+ code.funcstate.uses_error_indicator = True
+ code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
+ code.put_gotref(Naming.parallel_exc_type)
+
+ code.putln(
+ "}")
+
+ code.put_release_ensured_gil()
+ code.end_block()
+
+ def restore_parallel_exception(self, code):
+ "Re-raise a parallel exception"
+ code.begin_block()
+ code.put_ensure_gil(declare_gilstate=True)
+
+ code.put_giveref(Naming.parallel_exc_type)
+ code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc)
+ pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
+ code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
+
+ code.put_release_ensured_gil()
+ code.end_block()
+
+ def restore_labels(self, code):
+ """
+ Restore all old labels. Call this before the 'else' clause to for
+ loops and always before ending the parallel control flow block.
+ """
+ code.set_all_labels(self.old_loop_labels + (self.old_return_label,
+ self.old_error_label))
+
+ def end_parallel_control_flow_block(
+ self, code, break_=False, continue_=False, return_=False):
+ """
+ This ends the parallel control flow block and based on how the parallel
+ section was exited, takes the corresponding action. The break_ and
+ continue_ parameters indicate whether these should be propagated
+ outwards:
+
+ for i in prange(...):
+ with cython.parallel.parallel():
+ continue
+
+ Here break should be trapped in the parallel block, and propagated to
+ the for loop.
+ """
+ c = self.begin_of_parallel_control_block_point
+ self.begin_of_parallel_control_block_point = None
+ self.begin_of_parallel_control_block_point_after_decls = None
+
+ if self.num_threads is not None:
+ # FIXME: is it the right place? should not normally produce code.
+ self.num_threads.generate_disposal_code(code)
+ self.num_threads.free_temps(code)
+
+ # Firstly, always prefer errors over returning, continue or break
+ if self.error_label_used:
+ c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info)
+ c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc)
+
+ code.putln(
+ "if (%s) {" % Naming.parallel_exc_type)
+ code.putln("/* This may have been overridden by a continue, "
+ "break or return in another thread. Prefer the error. */")
+ code.putln("%s = 4;" % Naming.parallel_why)
+ code.putln(
+ "}")
+
+ if continue_:
+ any_label_used = self.any_label_used
+ else:
+ any_label_used = self.breaking_label_used
+
+ if any_label_used:
+ # __pyx_parallel_why is used, declare and initialize
+ c.putln("int %s;" % Naming.parallel_why)
+ c.putln("%s = 0;" % Naming.parallel_why)
+
+ code.putln(
+ "if (%s) {" % Naming.parallel_why)
+
+ for temp_cname, private_cname in self.parallel_private_temps:
+ code.putln("%s = %s;" % (private_cname, temp_cname))
+
+ code.putln("switch (%s) {" % Naming.parallel_why)
+ if continue_:
+ code.put(" case 1: ")
+ code.put_goto(code.continue_label)
+
+ if break_:
+ code.put(" case 2: ")
+ code.put_goto(code.break_label)
+
+ if return_:
+ code.put(" case 3: ")
+ code.put_goto(code.return_label)
+
+ if self.error_label_used:
+ code.globalstate.use_utility_code(restore_exception_utility_code)
+ code.putln(" case 4:")
+ self.restore_parallel_exception(code)
+ code.put_goto(code.error_label)
+
+ code.putln("}") # end switch
+ code.putln(
+ "}") # end if
+
+ code.end_block() # end parallel control flow block
+ self.redef_builtin_expect_apple_gcc_bug(code)
+
+ # FIXME: improve with version number for OS X Lion
+ buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
+ have_expect_condition = "(defined(__GNUC__) && " \
+ "(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
+ redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
+
+ def undef_builtin_expect_apple_gcc_bug(self, code):
+ """
+ A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
+ """
+ if not self.parent:
+ code.undef_builtin_expect(self.redef_condition)
+
+ def redef_builtin_expect_apple_gcc_bug(self, code):
+ if not self.parent:
+ code.redef_builtin_expect(self.redef_condition)
+
+
+class ParallelWithBlockNode(ParallelStatNode):
+ """
+ This node represents a 'with cython.parallel.parallel():' block
+ """
+
+ valid_keyword_arguments = ['num_threads']
+
+ num_threads = None
+
+ def analyse_declarations(self, env):
+ super(ParallelWithBlockNode, self).analyse_declarations(env)
+ if self.args:
+ error(self.pos, "cython.parallel.parallel() does not take "
+ "positional arguments")
+
+ def generate_execution_code(self, code):
+ self.declare_closure_privates(code)
+ self.setup_parallel_control_flow_block(code)
+
+ code.putln("#ifdef _OPENMP")
+ code.put("#pragma omp parallel ")
+
+ if self.privates:
+ privates = [e.cname for e in self.privates
+ if not e.type.is_pyobject]
+ code.put('private(%s)' % ', '.join(sorted(privates)))
+
+ self.privatization_insertion_point = code.insertion_point()
+ self.put_num_threads(code)
+ code.putln("")
+
+ code.putln("#endif /* _OPENMP */")
+
+ code.begin_block() # parallel block
+ self.begin_parallel_block(code)
+ self.initialize_privates_to_nan(code)
+ code.funcstate.start_collecting_temps()
+ self.body.generate_execution_code(code)
+ self.trap_parallel_exit(code)
+ self.privatize_temps(code)
+ self.end_parallel_block(code)
+ code.end_block() # end parallel block
+
+ continue_ = code.label_used(code.continue_label)
+ break_ = code.label_used(code.break_label)
+ return_ = code.label_used(code.return_label)
+
+ self.restore_labels(code)
+ self.end_parallel_control_flow_block(code, break_=break_,
+ continue_=continue_,
+ return_=return_)
+ self.release_closure_privates(code)
+
+
+class ParallelRangeNode(ParallelStatNode):
+ """
+ This node represents a 'for i in cython.parallel.prange():' construct.
+
+ target NameNode the target iteration variable
+ else_clause Node or None the else clause of this loop
+ """
+
+ child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
+ 'chunksize']
+
+ body = target = else_clause = args = None
+
+ start = stop = step = None
+
+ is_prange = True
+
+ nogil = None
+ schedule = None
+
+ valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
+
+ def __init__(self, pos, **kwds):
+ super(ParallelRangeNode, self).__init__(pos, **kwds)
+ # Pretend to be a ForInStatNode for control flow analysis
+ self.iterator = PassStatNode(pos)
+
+ def analyse_declarations(self, env):
+ super(ParallelRangeNode, self).analyse_declarations(env)
+ self.target.analyse_target_declaration(env)
+ if self.else_clause is not None:
+ self.else_clause.analyse_declarations(env)
+
+ if not self.args or len(self.args) > 3:
+ error(self.pos, "Invalid number of positional arguments to prange")
+ return
+
+ if len(self.args) == 1:
+ self.stop, = self.args
+ elif len(self.args) == 2:
+ self.start, self.stop = self.args
+ else:
+ self.start, self.stop, self.step = self.args
+
+ if hasattr(self.schedule, 'decode'):
+ self.schedule = self.schedule.decode('ascii')
+
+ if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'):
+ error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,))
+
+ def analyse_expressions(self, env):
+ was_nogil = env.nogil
+ if self.nogil:
+ env.nogil = True
+
+ if self.target is None:
+ error(self.pos, "prange() can only be used as part of a for loop")
+ return self
+
+ self.target = self.target.analyse_target_types(env)
+
+ if not self.target.type.is_numeric:
+ # Not a valid type, assume one for now anyway
+
+ if not self.target.type.is_pyobject:
+ # nogil_check will catch the is_pyobject case
+ error(self.target.pos,
+ "Must be of numeric type, not %s" % self.target.type)
+
+ self.index_type = PyrexTypes.c_py_ssize_t_type
+ else:
+ self.index_type = self.target.type
+
+ # Setup start, stop and step, allocating temps if needed
+ self.names = 'start', 'stop', 'step'
+ start_stop_step = self.start, self.stop, self.step
+
+ for node, name in zip(start_stop_step, self.names):
+ if node is not None:
+ node.analyse_types(env)
+ if not node.type.is_numeric:
+ error(node.pos, "%s argument must be numeric" % name)
+ continue
+
+ if not node.is_literal:
+ node = node.coerce_to_temp(env)
+ setattr(self, name, node)
+
+ # As we range from 0 to nsteps, computing the index along the
+ # way, we need a fitting type for 'i' and 'nsteps'
+ self.index_type = PyrexTypes.widest_numeric_type(
+ self.index_type, node.type)
+
+ if self.else_clause is not None:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+
+ # Although not actually an assignment in this scope, it should be
+ # treated as such to ensure it is unpacked if a closure temp, and to
+ # ensure lastprivate behaviour and propagation. If the target index is
+ # not a NameNode, it won't have an entry, and an error was issued by
+ # ParallelRangeTransform
+ if hasattr(self.target, 'entry'):
+ self.assignments[self.target.entry] = self.target.pos, None
+
+ node = super(ParallelRangeNode, self).analyse_expressions(env)
+
+ if node.chunksize:
+ if not node.schedule:
+ error(node.chunksize.pos,
+ "Must provide schedule with chunksize")
+ elif node.schedule == 'runtime':
+ error(node.chunksize.pos,
+ "Chunksize not valid for the schedule runtime")
+ elif (node.chunksize.type.is_int and
+ node.chunksize.is_literal and
+ node.chunksize.compile_time_value(env) <= 0):
+ error(node.chunksize.pos, "Chunksize must not be negative")
+
+ node.chunksize = node.chunksize.coerce_to(
+ PyrexTypes.c_int_type, env).coerce_to_temp(env)
+
+ if node.nogil:
+ env.nogil = was_nogil
+
+ node.is_nested_prange = node.parent and node.parent.is_prange
+ if node.is_nested_prange:
+ parent = node
+ while parent.parent and parent.parent.is_prange:
+ parent = parent.parent
+
+ parent.assignments.update(node.assignments)
+ parent.privates.update(node.privates)
+ parent.assigned_nodes.extend(node.assigned_nodes)
+ return node
+
+ def nogil_check(self, env):
+ names = 'start', 'stop', 'step', 'target'
+ nodes = self.start, self.stop, self.step, self.target
+ for name, node in zip(names, nodes):
+ if node is not None and node.type.is_pyobject:
+ error(node.pos, "%s may not be a Python object "
+ "as we don't have the GIL" % name)
+
+ def generate_execution_code(self, code):
+ """
+ Generate code in the following steps
+
+ 1) copy any closure variables determined thread-private
+ into temporaries
+
+ 2) allocate temps for start, stop and step
+
+ 3) generate a loop that calculates the total number of steps,
+ which then computes the target iteration variable for every step:
+
+ for i in prange(start, stop, step):
+ ...
+
+ becomes
+
+ nsteps = (stop - start) / step;
+ i = start;
+
+ #pragma omp parallel for lastprivate(i)
+ for (temp = 0; temp < nsteps; temp++) {
+ i = start + step * temp;
+ ...
+ }
+
+ Note that accumulation of 'i' would have a data dependency
+ between iterations.
+
+ Also, you can't do this
+
+ for (i = start; i < stop; i += step)
+ ...
+
+ as the '<' operator should become '>' for descending loops.
+ 'for i from x < i < y:' does not suffer from this problem
+ as the relational operator is known at compile time!
+
+ 4) release our temps and write back any private closure variables
+ """
+ self.declare_closure_privates(code)
+
+ # This can only be a NameNode
+ target_index_cname = self.target.entry.cname
+
+ # This will be used as the dict to format our code strings, holding
+ # the start, stop , step, temps and target cnames
+ fmt_dict = {
+ 'target': target_index_cname,
+ 'target_type': self.target.type.empty_declaration_code()
+ }
+
+ # Setup start, stop and step, allocating temps if needed
+ start_stop_step = self.start, self.stop, self.step
+ defaults = '0', '0', '1'
+ for node, name, default in zip(start_stop_step, self.names, defaults):
+ if node is None:
+ result = default
+ elif node.is_literal:
+ result = node.get_constant_c_result_code()
+ else:
+ node.generate_evaluation_code(code)
+ result = node.result()
+
+ fmt_dict[name] = result
+
+ fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
+ fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
+
+ # TODO: check if the step is 0 and if so, raise an exception in a
+ # 'with gil' block. For now, just abort
+ code.putln("if ((%(step)s == 0)) abort();" % fmt_dict)
+
+ self.setup_parallel_control_flow_block(code) # parallel control flow block
+
+ # Note: nsteps is private in an outer scope if present
+ code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict)
+
+ # The target iteration variable might not be initialized, do it only if
+ # we are executing at least 1 iteration, otherwise we should leave the
+ # target unaffected. The target iteration variable is firstprivate to
+ # shut up compiler warnings caused by lastprivate, as the compiler
+ # erroneously believes that nsteps may be <= 0, leaving the private
+ # target index uninitialized
+ code.putln("if (%(nsteps)s > 0)" % fmt_dict)
+ code.begin_block() # if block
+ self.generate_loop(code, fmt_dict)
+ code.end_block() # end if block
+
+ self.restore_labels(code)
+
+ if self.else_clause:
+ if self.breaking_label_used:
+ code.put("if (%s < 2)" % Naming.parallel_why)
+
+ code.begin_block() # else block
+ code.putln("/* else */")
+ self.else_clause.generate_execution_code(code)
+ code.end_block() # end else block
+
+ # ------ cleanup ------
+ self.end_parallel_control_flow_block(code) # end parallel control flow block
+
+ # And finally, release our privates and write back any closure
+ # variables
+ for temp in start_stop_step + (self.chunksize,):
+ if temp is not None:
+ temp.generate_disposal_code(code)
+ temp.free_temps(code)
+
+ code.funcstate.release_temp(fmt_dict['i'])
+ code.funcstate.release_temp(fmt_dict['nsteps'])
+
+ self.release_closure_privates(code)
+
+ def generate_loop(self, code, fmt_dict):
+ if self.is_nested_prange:
+ code.putln("#if 0")
+ else:
+ code.putln("#ifdef _OPENMP")
+
+ if not self.is_parallel:
+ code.put("#pragma omp for")
+ self.privatization_insertion_point = code.insertion_point()
+ reduction_codepoint = self.parent.privatization_insertion_point
+ else:
+ code.put("#pragma omp parallel")
+ self.privatization_insertion_point = code.insertion_point()
+ reduction_codepoint = self.privatization_insertion_point
+ code.putln("")
+ code.putln("#endif /* _OPENMP */")
+
+ code.begin_block() # pragma omp parallel begin block
+
+ # Initialize the GIL if needed for this thread
+ self.begin_parallel_block(code)
+
+ if self.is_nested_prange:
+ code.putln("#if 0")
+ else:
+ code.putln("#ifdef _OPENMP")
+ code.put("#pragma omp for")
+
+ for entry, (op, lastprivate) in sorted(self.privates.items()):
+ # Don't declare the index variable as a reduction
+ if op and op in "+*-&^|" and entry != self.target.entry:
+ if entry.type.is_pyobject:
+ error(self.pos, "Python objects cannot be reductions")
+ else:
+ #code.put(" reduction(%s:%s)" % (op, entry.cname))
+ # This is the only way reductions + nesting works in gcc4.5
+ reduction_codepoint.put(
+ " reduction(%s:%s)" % (op, entry.cname))
+ else:
+ if entry == self.target.entry:
+ code.put(" firstprivate(%s)" % entry.cname)
+ code.put(" lastprivate(%s)" % entry.cname)
+ continue
+
+ if not entry.type.is_pyobject:
+ if lastprivate:
+ private = 'lastprivate'
+ else:
+ private = 'private'
+
+ code.put(" %s(%s)" % (private, entry.cname))
+
+ if self.schedule:
+ if self.chunksize:
+ chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize)
+ else:
+ chunksize = ""
+
+ code.put(" schedule(%s%s)" % (self.schedule, chunksize))
+
+ self.put_num_threads(reduction_codepoint)
+
+ code.putln("")
+ code.putln("#endif /* _OPENMP */")
+
+ code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
+ code.begin_block() # for loop block
+
+ guard_around_body_codepoint = code.insertion_point()
+
+ # Start if guard block around the body. This may be unnecessary, but
+ # at least it doesn't spoil indentation
+ code.begin_block()
+
+ code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict)
+ self.initialize_privates_to_nan(code, exclude=self.target.entry)
+
+ if self.is_parallel and not self.is_nested_prange:
+ # nested pranges are not omp'ified, temps go to outer loops
+ code.funcstate.start_collecting_temps()
+
+ self.body.generate_execution_code(code)
+ self.trap_parallel_exit(code, should_flush=True)
+ if self.is_parallel and not self.is_nested_prange:
+ # nested pranges are not omp'ified, temps go to outer loops
+ self.privatize_temps(code)
+
+ if self.breaking_label_used:
+ # Put a guard around the loop body in case return, break or
+ # exceptions might be used
+ guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
+
+ code.end_block() # end guard around loop body
+ code.end_block() # end for loop block
+
+ if self.is_parallel:
+ # Release the GIL and deallocate the thread state
+ self.end_parallel_block(code)
+ code.end_block() # pragma omp parallel end block
+
+
+class CnameDecoratorNode(StatNode):
+ """
+ This node is for the cname decorator in CythonUtilityCode:
+
+ @cname('the_cname')
+ cdef func(...):
+ ...
+
+ In case of a cdef class the cname specifies the objstruct_cname.
+
+ node the node to which the cname decorator is applied
+ cname the cname the node should get
+ """
+
+ child_attrs = ['node']
+
+ def analyse_declarations(self, env):
+ self.node.analyse_declarations(env)
+
+ node = self.node
+ if isinstance(node, CompilerDirectivesNode):
+ node = node.body.stats[0]
+
+ self.is_function = isinstance(node, FuncDefNode)
+ is_struct_or_enum = isinstance(node, (CStructOrUnionDefNode, CEnumDefNode))
+ e = node.entry
+
+ if self.is_function:
+ e.cname = self.cname
+ e.func_cname = self.cname
+ e.used = True
+ if e.pyfunc_cname and '.' in e.pyfunc_cname:
+ e.pyfunc_cname = self.mangle(e.pyfunc_cname)
+ elif is_struct_or_enum:
+ e.cname = e.type.cname = self.cname
+ else:
+ scope = node.scope
+
+ e.cname = self.cname
+ e.type.objstruct_cname = self.cname + '_obj'
+ e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
+ e.type.typeptr_cname = self.cname + '_type'
+ e.type.scope.namespace_cname = e.type.typeptr_cname
+
+ e.as_variable.cname = e.type.typeptr_cname
+
+ scope.scope_prefix = self.cname + "_"
+
+ for name, entry in scope.entries.items():
+ if entry.func_cname:
+ entry.func_cname = self.mangle(entry.cname)
+ if entry.pyfunc_cname:
+ entry.pyfunc_cname = self.mangle(entry.pyfunc_cname)
+
+ def mangle(self, cname):
+ if '.' in cname:
+ # remove __pyx_base from func_cname
+ cname = cname.split('.')[-1]
+ return '%s_%s' % (self.cname, cname)
+
+ def analyse_expressions(self, env):
+ self.node = self.node.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ "Ensure a prototype for every @cname method in the right place"
+ if self.is_function and env.is_c_class_scope:
+ # method in cdef class, generate a prototype in the header
+ h_code = code.globalstate['utility_code_proto']
+
+ if isinstance(self.node, DefNode):
+ self.node.generate_function_header(
+ h_code, with_pymethdef=False, proto_only=True)
+ else:
+ from . import ModuleNode
+ entry = self.node.entry
+ cname = entry.cname
+ entry.cname = entry.func_cname
+
+ ModuleNode.generate_cfunction_declaration(
+ entry,
+ env.global_scope(),
+ h_code,
+ definition=True)
+
+ entry.cname = cname
+
+ self.node.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ self.node.generate_execution_code(code)
+
+
+#------------------------------------------------------------------------------------
+#
+# Runtime support code
+#
+#------------------------------------------------------------------------------------
+
+if Options.gcc_branch_hints:
+ branch_prediction_macros = """
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) \
+ && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+"""
+else:
+ branch_prediction_macros = """
+#define likely(x) (x)
+#define unlikely(x) (x)
+"""
+
+#------------------------------------------------------------------------------------
+
+printing_utility_code = UtilityCode.load_cached("Print", "Printing.c")
+printing_one_utility_code = UtilityCode.load_cached("PrintOne", "Printing.c")
+
+#------------------------------------------------------------------------------------
+
+# Exception raising code
+#
+# Exceptions are raised by __Pyx_Raise() and stored as plain
+# type/value/tb in PyThreadState->curexc_*. When being caught by an
+# 'except' statement, curexc_* is moved over to exc_* by
+# __Pyx_GetException()
+
+restore_exception_utility_code = UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c")
+raise_utility_code = UtilityCode.load_cached("RaiseException", "Exceptions.c")
+get_exception_utility_code = UtilityCode.load_cached("GetException", "Exceptions.c")
+swap_exception_utility_code = UtilityCode.load_cached("SwapException", "Exceptions.c")
+reset_exception_utility_code = UtilityCode.load_cached("SaveResetException", "Exceptions.c")
+traceback_utility_code = UtilityCode.load_cached("AddTraceback", "Exceptions.c")
+
+#------------------------------------------------------------------------------------
+
+get_exception_tuple_utility_code = UtilityCode(
+ proto="""
+static PyObject *__Pyx_GetExceptionTuple(PyThreadState *__pyx_tstate); /*proto*/
+""",
+ # I doubt that calling __Pyx_GetException() here is correct as it moves
+ # the exception from tstate->curexc_* to tstate->exc_*, which prevents
+ # exception handlers later on from receiving it.
+ # NOTE: "__pyx_tstate" may be used by __Pyx_GetException() macro
+ impl = """
+static PyObject *__Pyx_GetExceptionTuple(CYTHON_UNUSED PyThreadState *__pyx_tstate) {
+ PyObject *type = NULL, *value = NULL, *tb = NULL;
+ if (__Pyx_GetException(&type, &value, &tb) == 0) {
+ PyObject* exc_info = PyTuple_New(3);
+ if (exc_info) {
+ Py_INCREF(type);
+ Py_INCREF(value);
+ Py_INCREF(tb);
+ PyTuple_SET_ITEM(exc_info, 0, type);
+ PyTuple_SET_ITEM(exc_info, 1, value);
+ PyTuple_SET_ITEM(exc_info, 2, tb);
+ return exc_info;
+ }
+ }
+ return NULL;
+}
+""",
+ requires=[get_exception_utility_code])
diff --git a/contrib/tools/cython/Cython/Compiler/Optimize.py b/contrib/tools/cython/Cython/Compiler/Optimize.py
new file mode 100644
index 0000000000..7e9435ba0a
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Optimize.py
@@ -0,0 +1,4857 @@
+from __future__ import absolute_import
+
+import re
+import sys
+import copy
+import codecs
+import itertools
+
+from . import TypeSlots
+from .ExprNodes import not_a_constant
+import cython
+cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
+ Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
+ UtilNodes=object, _py_int_types=object)
+
+if sys.version_info[0] >= 3:
+ _py_int_types = int
+ _py_string_types = (bytes, str)
+else:
+ _py_int_types = (int, long)
+ _py_string_types = (bytes, unicode)
+
+from . import Nodes
+from . import ExprNodes
+from . import PyrexTypes
+from . import Visitor
+from . import Builtin
+from . import UtilNodes
+from . import Options
+
+from .Code import UtilityCode, TempitaUtilityCode
+from .StringEncoding import EncodedString, bytes_literal, encoded_string
+from .Errors import error, warning
+from .ParseTreeTransforms import SkipDeclarations
+
+try:
+ from __builtin__ import reduce
+except ImportError:
+ from functools import reduce
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ basestring = str # Python 3
+
+
+def load_c_utility(name):
+ return UtilityCode.load_cached(name, "Optimize.c")
+
+
+def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
+ if isinstance(node, coercion_nodes):
+ return node.arg
+ return node
+
+
+def unwrap_node(node):
+ while isinstance(node, UtilNodes.ResultRefNode):
+ node = node.expression
+ return node
+
+
+def is_common_value(a, b):
+ a = unwrap_node(a)
+ b = unwrap_node(b)
+ if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
+ return a.name == b.name
+ if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
+ return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
+ return False
+
+
+def filter_none_node(node):
+ if node is not None and node.constant_result is None:
+ return None
+ return node
+
+
+class _YieldNodeCollector(Visitor.TreeVisitor):
+ """
+ YieldExprNode finder for generator expressions.
+ """
+ def __init__(self):
+ Visitor.TreeVisitor.__init__(self)
+ self.yield_stat_nodes = {}
+ self.yield_nodes = []
+
+ visit_Node = Visitor.TreeVisitor.visitchildren
+
+ def visit_YieldExprNode(self, node):
+ self.yield_nodes.append(node)
+ self.visitchildren(node)
+
+ def visit_ExprStatNode(self, node):
+ self.visitchildren(node)
+ if node.expr in self.yield_nodes:
+ self.yield_stat_nodes[node.expr] = node
+
+ # everything below these nodes is out of scope:
+
+ def visit_GeneratorExpressionNode(self, node):
+ pass
+
+ def visit_LambdaNode(self, node):
+ pass
+
+ def visit_FuncDefNode(self, node):
+ pass
+
+
+def _find_single_yield_expression(node):
+ yield_statements = _find_yield_statements(node)
+ if len(yield_statements) != 1:
+ return None, None
+ return yield_statements[0]
+
+
+def _find_yield_statements(node):
+ collector = _YieldNodeCollector()
+ collector.visitchildren(node)
+ try:
+ yield_statements = [
+ (yield_node.arg, collector.yield_stat_nodes[yield_node])
+ for yield_node in collector.yield_nodes
+ ]
+ except KeyError:
+ # found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield')
+ yield_statements = []
+ return yield_statements
+
+
+class IterationTransform(Visitor.EnvTransform):
+ """Transform some common for-in loop patterns into efficient C loops:
+
+ - for-in-dict loop becomes a while loop calling PyDict_Next()
+ - for-in-enumerate is replaced by an external counter variable
+ - for-in-range loop becomes a plain C for loop
+ """
+ def visit_PrimaryCmpNode(self, node):
+ if node.is_ptr_contains():
+
+ # for t in operand2:
+ # if operand1 == t:
+ # res = True
+ # break
+ # else:
+ # res = False
+
+ pos = node.pos
+ result_ref = UtilNodes.ResultRefNode(node)
+ if node.operand2.is_subscript:
+ base_type = node.operand2.base.type.base_type
+ else:
+ base_type = node.operand2.type.base_type
+ target_handle = UtilNodes.TempHandle(base_type)
+ target = target_handle.ref(pos)
+ cmp_node = ExprNodes.PrimaryCmpNode(
+ pos, operator=u'==', operand1=node.operand1, operand2=target)
+ if_body = Nodes.StatListNode(
+ pos,
+ stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
+ Nodes.BreakStatNode(pos)])
+ if_node = Nodes.IfStatNode(
+ pos,
+ if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
+ else_clause=None)
+ for_loop = UtilNodes.TempsBlockNode(
+ pos,
+ temps = [target_handle],
+ body = Nodes.ForInStatNode(
+ pos,
+ target=target,
+ iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
+ body=if_node,
+ else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
+ for_loop = for_loop.analyse_expressions(self.current_env())
+ for_loop = self.visit(for_loop)
+ new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
+
+ if node.operator == 'not_in':
+ new_node = ExprNodes.NotNode(pos, operand=new_node)
+ return new_node
+
+ else:
+ self.visitchildren(node)
+ return node
+
+ def visit_ForInStatNode(self, node):
+ self.visitchildren(node)
+ return self._optimise_for_loop(node, node.iterator.sequence)
+
+ def _optimise_for_loop(self, node, iterable, reversed=False):
+ annotation_type = None
+ if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
+ annotation = iterable.entry.annotation
+ if annotation.is_subscript:
+ annotation = annotation.base # container base type
+ # FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
+ if annotation.is_name:
+ if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
+ annotation_type = Builtin.dict_type
+ elif annotation.name == 'Dict':
+ annotation_type = Builtin.dict_type
+ if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
+ annotation_type = Builtin.set_type
+ elif annotation.name in ('Set', 'FrozenSet'):
+ annotation_type = Builtin.set_type
+
+ if Builtin.dict_type in (iterable.type, annotation_type):
+ # like iterating over dict.keys()
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_dict_iteration(
+ node, dict_obj=iterable, method=None, keys=True, values=False)
+
+ if (Builtin.set_type in (iterable.type, annotation_type) or
+ Builtin.frozenset_type in (iterable.type, annotation_type)):
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_set_iteration(node, iterable)
+
+ # C array (slice) iteration?
+ if iterable.type.is_ptr or iterable.type.is_array:
+ return self._transform_carray_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.bytes_type:
+ return self._transform_bytes_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.unicode_type:
+ return self._transform_unicode_iteration(node, iterable, reversed=reversed)
+
+ # the rest is based on function calls
+ if not isinstance(iterable, ExprNodes.SimpleCallNode):
+ return node
+
+ if iterable.args is None:
+ arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
+ else:
+ arg_count = len(iterable.args)
+ if arg_count and iterable.self is not None:
+ arg_count -= 1
+
+ function = iterable.function
+ # dict iteration?
+ if function.is_attribute and not reversed and not arg_count:
+ base_obj = iterable.self or function.obj
+ method = function.attribute
+ # in Py3, items() is equivalent to Py2's iteritems()
+ is_safe_iter = self.global_scope().context.language_level >= 3
+
+ if not is_safe_iter and method in ('keys', 'values', 'items'):
+ # try to reduce this to the corresponding .iter*() methods
+ if isinstance(base_obj, ExprNodes.CallNode):
+ inner_function = base_obj.function
+ if (inner_function.is_name and inner_function.name == 'dict'
+ and inner_function.entry
+ and inner_function.entry.is_builtin):
+ # e.g. dict(something).items() => safe to use .iter*()
+ is_safe_iter = True
+
+ keys = values = False
+ if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
+ keys = True
+ elif method == 'itervalues' or (is_safe_iter and method == 'values'):
+ values = True
+ elif method == 'iteritems' or (is_safe_iter and method == 'items'):
+ keys = values = True
+
+ if keys or values:
+ return self._transform_dict_iteration(
+ node, base_obj, method, keys, values)
+
+ # enumerate/reversed ?
+ if iterable.self is None and function.is_name and \
+ function.entry and function.entry.is_builtin:
+ if function.name == 'enumerate':
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_enumerate_iteration(node, iterable)
+ elif function.name == 'reversed':
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_reversed_iteration(node, iterable)
+
+ # range() iteration?
+ if Options.convert_range and 1 <= arg_count <= 3 and (
+ iterable.self is None and
+ function.is_name and function.name in ('range', 'xrange') and
+ function.entry and function.entry.is_builtin):
+ if node.target.type.is_int or node.target.type.is_enum:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
+ if node.target.type.is_pyobject:
+ # Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
+ for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
+ if isinstance(arg, ExprNodes.IntNode):
+ if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
+ continue
+ break
+ else:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
+
+ return node
+
+ def _transform_reversed_iteration(self, node, reversed_function):
+ args = reversed_function.arg_tuple.args
+ if len(args) == 0:
+ error(reversed_function.pos,
+ "reversed() requires an iterable argument")
+ return node
+ elif len(args) > 1:
+ error(reversed_function.pos,
+ "reversed() takes exactly 1 argument")
+ return node
+ arg = args[0]
+
+ # reversed(list/tuple) ?
+ if arg.type in (Builtin.tuple_type, Builtin.list_type):
+ node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
+ node.iterator.reversed = True
+ return node
+
+ return self._optimise_for_loop(node, arg, reversed=True)
+
+ PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_char_ptr_type, [
+ PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
+ ])
+
+ PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ssize_t_type, [
+ PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
+ ])
+
+ def _transform_bytes_iteration(self, node, slice_node, reversed=False):
+ target_type = node.target.type
+ if not target_type.is_int and target_type is not Builtin.bytes_type:
+ # bytes iteration returns bytes objects in Py2, but
+ # integers in Py3
+ return node
+
+ unpack_temp_node = UtilNodes.LetRefNode(
+ slice_node.as_none_safe_node("'NoneType' is not iterable"))
+
+ slice_base_node = ExprNodes.PythonCapiCallNode(
+ slice_node.pos, "PyBytes_AS_STRING",
+ self.PyBytes_AS_STRING_func_type,
+ args = [unpack_temp_node],
+ is_temp = 0,
+ )
+ len_node = ExprNodes.PythonCapiCallNode(
+ slice_node.pos, "PyBytes_GET_SIZE",
+ self.PyBytes_GET_SIZE_func_type,
+ args = [unpack_temp_node],
+ is_temp = 0,
+ )
+
+ return UtilNodes.LetNode(
+ unpack_temp_node,
+ self._transform_carray_iteration(
+ node,
+ ExprNodes.SliceIndexNode(
+ slice_node.pos,
+ base = slice_base_node,
+ start = None,
+ step = None,
+ stop = len_node,
+ type = slice_base_node.type,
+ is_temp = 1,
+ ),
+ reversed = reversed))
+
+ PyUnicode_READ_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ucs4_type, [
+ PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
+ ])
+
+ init_unicode_iteration_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_int_type, [
+ PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
+ ],
+ exception_value = '-1')
+
+ def _transform_unicode_iteration(self, node, slice_node, reversed=False):
+ if slice_node.is_literal:
+ # try to reduce to byte iteration for plain Latin-1 strings
+ try:
+ bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1')
+ except UnicodeEncodeError:
+ pass
+ else:
+ bytes_slice = ExprNodes.SliceIndexNode(
+ slice_node.pos,
+ base=ExprNodes.BytesNode(
+ slice_node.pos, value=bytes_value,
+ constant_result=bytes_value,
+ type=PyrexTypes.c_const_char_ptr_type).coerce_to(
+ PyrexTypes.c_const_uchar_ptr_type, self.current_env()),
+ start=None,
+ stop=ExprNodes.IntNode(
+ slice_node.pos, value=str(len(bytes_value)),
+ constant_result=len(bytes_value),
+ type=PyrexTypes.c_py_ssize_t_type),
+ type=Builtin.unicode_type, # hint for Python conversion
+ )
+ return self._transform_carray_iteration(node, bytes_slice, reversed)
+
+ unpack_temp_node = UtilNodes.LetRefNode(
+ slice_node.as_none_safe_node("'NoneType' is not iterable"))
+
+ start_node = ExprNodes.IntNode(
+ node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
+ length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ end_node = length_temp.ref(node.pos)
+ if reversed:
+ relation1, relation2 = '>', '>='
+ start_node, end_node = end_node, start_node
+ else:
+ relation1, relation2 = '<=', '<'
+
+ kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
+ counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+
+ target_value = ExprNodes.PythonCapiCallNode(
+ slice_node.pos, "__Pyx_PyUnicode_READ",
+ self.PyUnicode_READ_func_type,
+ args = [kind_temp.ref(slice_node.pos),
+ data_temp.ref(slice_node.pos),
+ counter_temp.ref(node.target.pos)],
+ is_temp = False,
+ )
+ if target_value.type != node.target.type:
+ target_value = target_value.coerce_to(node.target.type,
+ self.current_env())
+ target_assign = Nodes.SingleAssignmentNode(
+ pos = node.target.pos,
+ lhs = node.target,
+ rhs = target_value)
+ body = Nodes.StatListNode(
+ node.pos,
+ stats = [target_assign, node.body])
+
+ loop_node = Nodes.ForFromStatNode(
+ node.pos,
+ bound1=start_node, relation1=relation1,
+ target=counter_temp.ref(node.target.pos),
+ relation2=relation2, bound2=end_node,
+ step=None, body=body,
+ else_clause=node.else_clause,
+ from_range=True)
+
+ setup_node = Nodes.ExprStatNode(
+ node.pos,
+ expr = ExprNodes.PythonCapiCallNode(
+ slice_node.pos, "__Pyx_init_unicode_iteration",
+ self.init_unicode_iteration_func_type,
+ args = [unpack_temp_node,
+ ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
+ type=PyrexTypes.c_py_ssize_t_ptr_type),
+ ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
+ type=PyrexTypes.c_void_ptr_ptr_type),
+ ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
+ type=PyrexTypes.c_int_ptr_type),
+ ],
+ is_temp = True,
+ result_is_used = False,
+ utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
+ ))
+ return UtilNodes.LetNode(
+ unpack_temp_node,
+ UtilNodes.TempsBlockNode(
+ node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
+ body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
+
+ def _transform_carray_iteration(self, node, slice_node, reversed=False):
+ neg_step = False
+ if isinstance(slice_node, ExprNodes.SliceIndexNode):
+ slice_base = slice_node.base
+ start = filter_none_node(slice_node.start)
+ stop = filter_none_node(slice_node.stop)
+ step = None
+ if not stop:
+ if not slice_base.type.is_pyobject:
+ error(slice_node.pos, "C array iteration requires known end index")
+ return node
+
+ elif slice_node.is_subscript:
+ assert isinstance(slice_node.index, ExprNodes.SliceNode)
+ slice_base = slice_node.base
+ index = slice_node.index
+ start = filter_none_node(index.start)
+ stop = filter_none_node(index.stop)
+ step = filter_none_node(index.step)
+ if step:
+ if not isinstance(step.constant_result, _py_int_types) \
+ or step.constant_result == 0 \
+ or step.constant_result > 0 and not stop \
+ or step.constant_result < 0 and not start:
+ if not slice_base.type.is_pyobject:
+ error(step.pos, "C array iteration requires known step size and end index")
+ return node
+ else:
+ # step sign is handled internally by ForFromStatNode
+ step_value = step.constant_result
+ if reversed:
+ step_value = -step_value
+ neg_step = step_value < 0
+ step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
+ value=str(abs(step_value)),
+ constant_result=abs(step_value))
+
+ elif slice_node.type.is_array:
+ if slice_node.type.size is None:
+ error(slice_node.pos, "C array iteration requires known end index")
+ return node
+ slice_base = slice_node
+ start = None
+ stop = ExprNodes.IntNode(
+ slice_node.pos, value=str(slice_node.type.size),
+ type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
+ step = None
+
+ else:
+ if not slice_node.type.is_pyobject:
+ error(slice_node.pos, "C array iteration requires known end index")
+ return node
+
+ if start:
+ start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ if stop:
+ stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ if stop is None:
+ if neg_step:
+ stop = ExprNodes.IntNode(
+ slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
+ else:
+ error(slice_node.pos, "C array iteration requires known step size and end index")
+ return node
+
+ if reversed:
+ if not start:
+ start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
+ type=PyrexTypes.c_py_ssize_t_type)
+ # if step was provided, it was already negated above
+ start, stop = stop, start
+
+ ptr_type = slice_base.type
+ if ptr_type.is_array:
+ ptr_type = ptr_type.element_ptr_type()
+ carray_ptr = slice_base.coerce_to_simple(self.current_env())
+
+ if start and start.constant_result != 0:
+ start_ptr_node = ExprNodes.AddNode(
+ start.pos,
+ operand1=carray_ptr,
+ operator='+',
+ operand2=start,
+ type=ptr_type)
+ else:
+ start_ptr_node = carray_ptr
+
+ if stop and stop.constant_result != 0:
+ stop_ptr_node = ExprNodes.AddNode(
+ stop.pos,
+ operand1=ExprNodes.CloneNode(carray_ptr),
+ operator='+',
+ operand2=stop,
+ type=ptr_type
+ ).coerce_to_simple(self.current_env())
+ else:
+ stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
+
+ counter = UtilNodes.TempHandle(ptr_type)
+ counter_temp = counter.ref(node.target.pos)
+
+ if slice_base.type.is_string and node.target.type.is_pyobject:
+ # special case: char* -> bytes/unicode
+ if slice_node.type is Builtin.unicode_type:
+ target_value = ExprNodes.CastNode(
+ ExprNodes.DereferenceNode(
+ node.target.pos, operand=counter_temp,
+ type=ptr_type.base_type),
+ PyrexTypes.c_py_ucs4_type).coerce_to(
+ node.target.type, self.current_env())
+ else:
+ # char* -> bytes coercion requires slicing, not indexing
+ target_value = ExprNodes.SliceIndexNode(
+ node.target.pos,
+ start=ExprNodes.IntNode(node.target.pos, value='0',
+ constant_result=0,
+ type=PyrexTypes.c_int_type),
+ stop=ExprNodes.IntNode(node.target.pos, value='1',
+ constant_result=1,
+ type=PyrexTypes.c_int_type),
+ base=counter_temp,
+ type=Builtin.bytes_type,
+ is_temp=1)
+ elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
+ # Allow iteration with pointer target to avoid copy.
+ target_value = counter_temp
+ else:
+ # TODO: can this safely be replaced with DereferenceNode() as above?
+ target_value = ExprNodes.IndexNode(
+ node.target.pos,
+ index=ExprNodes.IntNode(node.target.pos, value='0',
+ constant_result=0,
+ type=PyrexTypes.c_int_type),
+ base=counter_temp,
+ type=ptr_type.base_type)
+
+ if target_value.type != node.target.type:
+ target_value = target_value.coerce_to(node.target.type,
+ self.current_env())
+
+ target_assign = Nodes.SingleAssignmentNode(
+ pos = node.target.pos,
+ lhs = node.target,
+ rhs = target_value)
+
+ body = Nodes.StatListNode(
+ node.pos,
+ stats = [target_assign, node.body])
+
+ relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
+
+ for_node = Nodes.ForFromStatNode(
+ node.pos,
+ bound1=start_ptr_node, relation1=relation1,
+ target=counter_temp,
+ relation2=relation2, bound2=stop_ptr_node,
+ step=step, body=body,
+ else_clause=node.else_clause,
+ from_range=True)
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=[counter],
+ body=for_node)
+
+ def _transform_enumerate_iteration(self, node, enumerate_function):
+ args = enumerate_function.arg_tuple.args
+ if len(args) == 0:
+ error(enumerate_function.pos,
+ "enumerate() requires an iterable argument")
+ return node
+ elif len(args) > 2:
+ error(enumerate_function.pos,
+ "enumerate() takes at most 2 arguments")
+ return node
+
+ if not node.target.is_sequence_constructor:
+ # leave this untouched for now
+ return node
+ targets = node.target.args
+ if len(targets) != 2:
+ # leave this untouched for now
+ return node
+
+ enumerate_target, iterable_target = targets
+ counter_type = enumerate_target.type
+
+ if not counter_type.is_pyobject and not counter_type.is_int:
+ # nothing we can do here, I guess
+ return node
+
+ if len(args) == 2:
+ start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
+ else:
+ start = ExprNodes.IntNode(enumerate_function.pos,
+ value='0',
+ type=counter_type,
+ constant_result=0)
+ temp = UtilNodes.LetRefNode(start)
+
+ inc_expression = ExprNodes.AddNode(
+ enumerate_function.pos,
+ operand1 = temp,
+ operand2 = ExprNodes.IntNode(node.pos, value='1',
+ type=counter_type,
+ constant_result=1),
+ operator = '+',
+ type = counter_type,
+ #inplace = True, # not worth using in-place operation for Py ints
+ is_temp = counter_type.is_pyobject
+ )
+
+ loop_body = [
+ Nodes.SingleAssignmentNode(
+ pos = enumerate_target.pos,
+ lhs = enumerate_target,
+ rhs = temp),
+ Nodes.SingleAssignmentNode(
+ pos = enumerate_target.pos,
+ lhs = temp,
+ rhs = inc_expression)
+ ]
+
+ if isinstance(node.body, Nodes.StatListNode):
+ node.body.stats = loop_body + node.body.stats
+ else:
+ loop_body.append(node.body)
+ node.body = Nodes.StatListNode(
+ node.body.pos,
+ stats = loop_body)
+
+ node.target = iterable_target
+ node.item = node.item.coerce_to(iterable_target.type, self.current_env())
+ node.iterator.sequence = args[0]
+
+ # recurse into loop to check for further optimisations
+ return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
+
+ def _find_for_from_node_relations(self, neg_step_value, reversed):
+ if reversed:
+ if neg_step_value:
+ return '<', '<='
+ else:
+ return '>', '>='
+ else:
+ if neg_step_value:
+ return '>=', '>'
+ else:
+ return '<=', '<'
+
+ def _transform_range_iteration(self, node, range_function, reversed=False):
+ args = range_function.arg_tuple.args
+ if len(args) < 3:
+ step_pos = range_function.pos
+ step_value = 1
+ step = ExprNodes.IntNode(step_pos, value='1', constant_result=1)
+ else:
+ step = args[2]
+ step_pos = step.pos
+ if not isinstance(step.constant_result, _py_int_types):
+ # cannot determine step direction
+ return node
+ step_value = step.constant_result
+ if step_value == 0:
+ # will lead to an error elsewhere
+ return node
+ step = ExprNodes.IntNode(step_pos, value=str(step_value),
+ constant_result=step_value)
+
+ if len(args) == 1:
+ bound1 = ExprNodes.IntNode(range_function.pos, value='0',
+ constant_result=0)
+ bound2 = args[0].coerce_to_integer(self.current_env())
+ else:
+ bound1 = args[0].coerce_to_integer(self.current_env())
+ bound2 = args[1].coerce_to_integer(self.current_env())
+
+ relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
+
+ bound2_ref_node = None
+ if reversed:
+ bound1, bound2 = bound2, bound1
+ abs_step = abs(step_value)
+ if abs_step != 1:
+ if (isinstance(bound1.constant_result, _py_int_types) and
+ isinstance(bound2.constant_result, _py_int_types)):
+ # calculate final bounds now
+ if step_value < 0:
+ begin_value = bound2.constant_result
+ end_value = bound1.constant_result
+ bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
+ else:
+ begin_value = bound1.constant_result
+ end_value = bound2.constant_result
+ bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
+
+ bound1 = ExprNodes.IntNode(
+ bound1.pos, value=str(bound1_value), constant_result=bound1_value,
+ type=PyrexTypes.spanning_type(bound1.type, bound2.type))
+ else:
+ # evaluate the same expression as above at runtime
+ bound2_ref_node = UtilNodes.LetRefNode(bound2)
+ bound1 = self._build_range_step_calculation(
+ bound1, bound2_ref_node, step, step_value)
+
+ if step_value < 0:
+ step_value = -step_value
+ step.value = str(step_value)
+ step.constant_result = step_value
+ step = step.coerce_to_integer(self.current_env())
+
+ if not bound2.is_literal:
+ # stop bound must be immutable => keep it in a temp var
+ bound2_is_temp = True
+ bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
+ else:
+ bound2_is_temp = False
+
+ for_node = Nodes.ForFromStatNode(
+ node.pos,
+ target=node.target,
+ bound1=bound1, relation1=relation1,
+ relation2=relation2, bound2=bound2,
+ step=step, body=node.body,
+ else_clause=node.else_clause,
+ from_range=True)
+ for_node.set_up_loop(self.current_env())
+
+ if bound2_is_temp:
+ for_node = UtilNodes.LetNode(bound2, for_node)
+
+ return for_node
+
+ def _build_range_step_calculation(self, bound1, bound2_ref_node, step, step_value):
+ abs_step = abs(step_value)
+ spanning_type = PyrexTypes.spanning_type(bound1.type, bound2_ref_node.type)
+ if step.type.is_int and abs_step < 0x7FFF:
+ # Avoid loss of integer precision warnings.
+ spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type)
+ else:
+ spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
+ if step_value < 0:
+ begin_value = bound2_ref_node
+ end_value = bound1
+ final_op = '-'
+ else:
+ begin_value = bound1
+ end_value = bound2_ref_node
+ final_op = '+'
+
+ step_calculation_node = ExprNodes.binop_node(
+ bound1.pos,
+ operand1=ExprNodes.binop_node(
+ bound1.pos,
+ operand1=bound2_ref_node,
+ operator=final_op, # +/-
+ operand2=ExprNodes.MulNode(
+ bound1.pos,
+ operand1=ExprNodes.IntNode(
+ bound1.pos,
+ value=str(abs_step),
+ constant_result=abs_step,
+ type=spanning_step_type),
+ operator='*',
+ operand2=ExprNodes.DivNode(
+ bound1.pos,
+ operand1=ExprNodes.SubNode(
+ bound1.pos,
+ operand1=ExprNodes.SubNode(
+ bound1.pos,
+ operand1=begin_value,
+ operator='-',
+ operand2=end_value,
+ type=spanning_type),
+ operator='-',
+ operand2=ExprNodes.IntNode(
+ bound1.pos,
+ value='1',
+ constant_result=1),
+ type=spanning_step_type),
+ operator='//',
+ operand2=ExprNodes.IntNode(
+ bound1.pos,
+ value=str(abs_step),
+ constant_result=abs_step,
+ type=spanning_step_type),
+ type=spanning_step_type),
+ type=spanning_step_type),
+ type=spanning_step_type),
+ operator=final_op, # +/-
+ operand2=ExprNodes.IntNode(
+ bound1.pos,
+ value='1',
+ constant_result=1),
+ type=spanning_type)
+ return step_calculation_node
+
+ def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
+ temps = []
+ temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ temps.append(temp)
+ dict_temp = temp.ref(dict_obj.pos)
+ temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(temp)
+ pos_temp = temp.ref(node.pos)
+
+ key_target = value_target = tuple_target = None
+ if keys and values:
+ if node.target.is_sequence_constructor:
+ if len(node.target.args) == 2:
+ key_target, value_target = node.target.args
+ else:
+ # unusual case that may or may not lead to an error
+ return node
+ else:
+ tuple_target = node.target
+ elif keys:
+ key_target = node.target
+ else:
+ value_target = node.target
+
+ if isinstance(node.body, Nodes.StatListNode):
+ body = node.body
+ else:
+ body = Nodes.StatListNode(pos = node.body.pos,
+ stats = [node.body])
+
+ # keep original length to guard against dict modification
+ dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(dict_len_temp)
+ dict_len_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=dict_len_temp.ref(dict_obj.pos),
+ type=PyrexTypes.c_ptr_type(dict_len_temp.type))
+ temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ temps.append(temp)
+ is_dict_temp = temp.ref(node.pos)
+ is_dict_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=is_dict_temp,
+ type=PyrexTypes.c_ptr_type(temp.type))
+
+ iter_next_node = Nodes.DictIterationNextNode(
+ dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
+ key_target, value_target, tuple_target,
+ is_dict_temp)
+ iter_next_node = iter_next_node.analyse_expressions(self.current_env())
+ body.stats[0:0] = [iter_next_node]
+
+ if method:
+ method_node = ExprNodes.StringNode(
+ dict_obj.pos, is_identifier=True, value=method)
+ dict_obj = dict_obj.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(method) <= 30 else ''),
+ error = "PyExc_AttributeError",
+ format_args = [method])
+ else:
+ method_node = ExprNodes.NullNode(dict_obj.pos)
+ dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
+
+ def flag_node(value):
+ value = value and 1 or 0
+ return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
+
+ result_code = [
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs = pos_temp,
+ rhs = ExprNodes.IntNode(node.pos, value='0',
+ constant_result=0)),
+ Nodes.SingleAssignmentNode(
+ dict_obj.pos,
+ lhs = dict_temp,
+ rhs = ExprNodes.PythonCapiCallNode(
+ dict_obj.pos,
+ "__Pyx_dict_iterator",
+ self.PyDict_Iterator_func_type,
+ utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
+ args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
+ method_node, dict_len_temp_addr, is_dict_temp_addr,
+ ],
+ is_temp=True,
+ )),
+ Nodes.WhileStatNode(
+ node.pos,
+ condition = None,
+ body = body,
+ else_clause = node.else_clause
+ )
+ ]
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=temps,
+ body=Nodes.StatListNode(
+ node.pos,
+ stats = result_code
+ ))
+
+ PyDict_Iterator_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
+ ])
+
+ PySet_Iterator_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
+ ])
+
+ def _transform_set_iteration(self, node, set_obj):
+ temps = []
+ temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ temps.append(temp)
+ set_temp = temp.ref(set_obj.pos)
+ temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(temp)
+ pos_temp = temp.ref(node.pos)
+
+ if isinstance(node.body, Nodes.StatListNode):
+ body = node.body
+ else:
+ body = Nodes.StatListNode(pos = node.body.pos,
+ stats = [node.body])
+
+ # keep original length to guard against set modification
+ set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(set_len_temp)
+ set_len_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=set_len_temp.ref(set_obj.pos),
+ type=PyrexTypes.c_ptr_type(set_len_temp.type))
+ temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ temps.append(temp)
+ is_set_temp = temp.ref(node.pos)
+ is_set_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=is_set_temp,
+ type=PyrexTypes.c_ptr_type(temp.type))
+
+ value_target = node.target
+ iter_next_node = Nodes.SetIterationNextNode(
+ set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
+ iter_next_node = iter_next_node.analyse_expressions(self.current_env())
+ body.stats[0:0] = [iter_next_node]
+
+ def flag_node(value):
+ value = value and 1 or 0
+ return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
+
+ result_code = [
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=pos_temp,
+ rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
+ Nodes.SingleAssignmentNode(
+ set_obj.pos,
+ lhs=set_temp,
+ rhs=ExprNodes.PythonCapiCallNode(
+ set_obj.pos,
+ "__Pyx_set_iterator",
+ self.PySet_Iterator_func_type,
+ utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
+ args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
+ set_len_temp_addr, is_set_temp_addr,
+ ],
+ is_temp=True,
+ )),
+ Nodes.WhileStatNode(
+ node.pos,
+ condition=None,
+ body=body,
+ else_clause=node.else_clause,
+ )
+ ]
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=temps,
+ body=Nodes.StatListNode(
+ node.pos,
+ stats = result_code
+ ))
+
+
+class SwitchTransform(Visitor.EnvTransform):
+ """
+ This transformation tries to turn long if statements into C switch statements.
+ The requirement is that every clause be an (or of) var == value, where the var
+ is common among all clauses and both var and value are ints.
+ """
+ NO_MATCH = (None, None, None)
+
+ def extract_conditions(self, cond, allow_not_in):
+ while True:
+ if isinstance(cond, (ExprNodes.CoerceToTempNode,
+ ExprNodes.CoerceToBooleanNode)):
+ cond = cond.arg
+ elif isinstance(cond, ExprNodes.BoolBinopResultNode):
+ cond = cond.arg.arg
+ elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
+ # this is what we get from the FlattenInListTransform
+ cond = cond.subexpression
+ elif isinstance(cond, ExprNodes.TypecastNode):
+ cond = cond.operand
+ else:
+ break
+
+ if isinstance(cond, ExprNodes.PrimaryCmpNode):
+ if cond.cascade is not None:
+ return self.NO_MATCH
+ elif cond.is_c_string_contains() and \
+ isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
+ not_in = cond.operator == 'not_in'
+ if not_in and not allow_not_in:
+ return self.NO_MATCH
+ if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
+ cond.operand2.contains_surrogates():
+ # dealing with surrogates leads to different
+ # behaviour on wide and narrow Unicode
+ # platforms => refuse to optimise this case
+ return self.NO_MATCH
+ return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
+ elif not cond.is_python_comparison():
+ if cond.operator == '==':
+ not_in = False
+ elif allow_not_in and cond.operator == '!=':
+ not_in = True
+ else:
+ return self.NO_MATCH
+ # this looks somewhat silly, but it does the right
+ # checks for NameNode and AttributeNode
+ if is_common_value(cond.operand1, cond.operand1):
+ if cond.operand2.is_literal:
+ return not_in, cond.operand1, [cond.operand2]
+ elif getattr(cond.operand2, 'entry', None) \
+ and cond.operand2.entry.is_const:
+ return not_in, cond.operand1, [cond.operand2]
+ if is_common_value(cond.operand2, cond.operand2):
+ if cond.operand1.is_literal:
+ return not_in, cond.operand2, [cond.operand1]
+ elif getattr(cond.operand1, 'entry', None) \
+ and cond.operand1.entry.is_const:
+ return not_in, cond.operand2, [cond.operand1]
+ elif isinstance(cond, ExprNodes.BoolBinopNode):
+ if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
+ allow_not_in = (cond.operator == 'and')
+ not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
+ not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
+ if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
+ if (not not_in_1) or allow_not_in:
+ return not_in_1, t1, c1+c2
+ return self.NO_MATCH
+
+ def extract_in_string_conditions(self, string_literal):
+ if isinstance(string_literal, ExprNodes.UnicodeNode):
+ charvals = list(map(ord, set(string_literal.value)))
+ charvals.sort()
+ return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
+ constant_result=charval)
+ for charval in charvals ]
+ else:
+ # this is a bit tricky as Py3's bytes type returns
+ # integers on iteration, whereas Py2 returns 1-char byte
+ # strings
+ characters = string_literal.value
+ characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
+ characters.sort()
+ return [ ExprNodes.CharNode(string_literal.pos, value=charval,
+ constant_result=charval)
+ for charval in characters ]
+
+ def extract_common_conditions(self, common_var, condition, allow_not_in):
+ not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
+ if var is None:
+ return self.NO_MATCH
+ elif common_var is not None and not is_common_value(var, common_var):
+ return self.NO_MATCH
+ elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
+ return self.NO_MATCH
+ return not_in, var, conditions
+
+ def has_duplicate_values(self, condition_values):
+ # duplicated values don't work in a switch statement
+ seen = set()
+ for value in condition_values:
+ if value.has_constant_result():
+ if value.constant_result in seen:
+ return True
+ seen.add(value.constant_result)
+ else:
+ # this isn't completely safe as we don't know the
+ # final C value, but this is about the best we can do
+ try:
+ if value.entry.cname in seen:
+ return True
+ except AttributeError:
+ return True # play safe
+ seen.add(value.entry.cname)
+ return False
+
+ def visit_IfStatNode(self, node):
+ if not self.current_directives.get('optimize.use_switch'):
+ self.visitchildren(node)
+ return node
+
+ common_var = None
+ cases = []
+ for if_clause in node.if_clauses:
+ _, common_var, conditions = self.extract_common_conditions(
+ common_var, if_clause.condition, False)
+ if common_var is None:
+ self.visitchildren(node)
+ return node
+ cases.append(Nodes.SwitchCaseNode(pos=if_clause.pos,
+ conditions=conditions,
+ body=if_clause.body))
+
+ condition_values = [
+ cond for case in cases for cond in case.conditions]
+ if len(condition_values) < 2:
+ self.visitchildren(node)
+ return node
+ if self.has_duplicate_values(condition_values):
+ self.visitchildren(node)
+ return node
+
+ # Recurse into body subtrees that we left untouched so far.
+ self.visitchildren(node, 'else_clause')
+ for case in cases:
+ self.visitchildren(case, 'body')
+
+ common_var = unwrap_node(common_var)
+ switch_node = Nodes.SwitchStatNode(pos=node.pos,
+ test=common_var,
+ cases=cases,
+ else_clause=node.else_clause)
+ return switch_node
+
+ def visit_CondExprNode(self, node):
+ if not self.current_directives.get('optimize.use_switch'):
+ self.visitchildren(node)
+ return node
+
+ not_in, common_var, conditions = self.extract_common_conditions(
+ None, node.test, True)
+ if common_var is None \
+ or len(conditions) < 2 \
+ or self.has_duplicate_values(conditions):
+ self.visitchildren(node)
+ return node
+
+ return self.build_simple_switch_statement(
+ node, common_var, conditions, not_in,
+ node.true_val, node.false_val)
+
+ def visit_BoolBinopNode(self, node):
+ if not self.current_directives.get('optimize.use_switch'):
+ self.visitchildren(node)
+ return node
+
+ not_in, common_var, conditions = self.extract_common_conditions(
+ None, node, True)
+ if common_var is None \
+ or len(conditions) < 2 \
+ or self.has_duplicate_values(conditions):
+ self.visitchildren(node)
+ node.wrap_operands(self.current_env()) # in case we changed the operands
+ return node
+
+ return self.build_simple_switch_statement(
+ node, common_var, conditions, not_in,
+ ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
+ ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
+
+ def visit_PrimaryCmpNode(self, node):
+ if not self.current_directives.get('optimize.use_switch'):
+ self.visitchildren(node)
+ return node
+
+ not_in, common_var, conditions = self.extract_common_conditions(
+ None, node, True)
+ if common_var is None \
+ or len(conditions) < 2 \
+ or self.has_duplicate_values(conditions):
+ self.visitchildren(node)
+ return node
+
+ return self.build_simple_switch_statement(
+ node, common_var, conditions, not_in,
+ ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
+ ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
+
+ def build_simple_switch_statement(self, node, common_var, conditions,
+ not_in, true_val, false_val):
+ result_ref = UtilNodes.ResultRefNode(node)
+ true_body = Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=result_ref,
+ rhs=true_val.coerce_to(node.type, self.current_env()),
+ first=True)
+ false_body = Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=result_ref,
+ rhs=false_val.coerce_to(node.type, self.current_env()),
+ first=True)
+
+ if not_in:
+ true_body, false_body = false_body, true_body
+
+ cases = [Nodes.SwitchCaseNode(pos = node.pos,
+ conditions = conditions,
+ body = true_body)]
+
+ common_var = unwrap_node(common_var)
+ switch_node = Nodes.SwitchStatNode(pos = node.pos,
+ test = common_var,
+ cases = cases,
+ else_clause = false_body)
+ replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
+ return replacement
+
+ def visit_EvalWithTempExprNode(self, node):
+ if not self.current_directives.get('optimize.use_switch'):
+ self.visitchildren(node)
+ return node
+
+ # drop unused expression temp from FlattenInListTransform
+ orig_expr = node.subexpression
+ temp_ref = node.lazy_temp
+ self.visitchildren(node)
+ if node.subexpression is not orig_expr:
+ # node was restructured => check if temp is still used
+ if not Visitor.tree_contains(node.subexpression, temp_ref):
+ return node.subexpression
+ return node
+
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+
+class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
+ """
+ This transformation flattens "x in [val1, ..., valn]" into a sequential list
+ of comparisons.
+ """
+
+ def visit_PrimaryCmpNode(self, node):
+ self.visitchildren(node)
+ if node.cascade is not None:
+ return node
+ elif node.operator == 'in':
+ conjunction = 'or'
+ eq_or_neq = '=='
+ elif node.operator == 'not_in':
+ conjunction = 'and'
+ eq_or_neq = '!='
+ else:
+ return node
+
+ if not isinstance(node.operand2, (ExprNodes.TupleNode,
+ ExprNodes.ListNode,
+ ExprNodes.SetNode)):
+ return node
+
+ args = node.operand2.args
+ if len(args) == 0:
+ # note: lhs may have side effects
+ return node
+
+ if any([arg.is_starred for arg in args]):
+ # Starred arguments do not directly translate to comparisons or "in" tests.
+ return node
+
+ lhs = UtilNodes.ResultRefNode(node.operand1)
+
+ conds = []
+ temps = []
+ for arg in args:
+ try:
+ # Trial optimisation to avoid redundant temp
+ # assignments. However, since is_simple() is meant to
+ # be called after type analysis, we ignore any errors
+ # and just play safe in that case.
+ is_simple_arg = arg.is_simple()
+ except Exception:
+ is_simple_arg = False
+ if not is_simple_arg:
+ # must evaluate all non-simple RHS before doing the comparisons
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ cond = ExprNodes.PrimaryCmpNode(
+ pos = node.pos,
+ operand1 = lhs,
+ operator = eq_or_neq,
+ operand2 = arg,
+ cascade = None)
+ conds.append(ExprNodes.TypecastNode(
+ pos = node.pos,
+ operand = cond,
+ type = PyrexTypes.c_bint_type))
+ def concat(left, right):
+ return ExprNodes.BoolBinopNode(
+ pos = node.pos,
+ operator = conjunction,
+ operand1 = left,
+ operand2 = right)
+
+ condition = reduce(concat, conds)
+ new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
+ for temp in temps[::-1]:
+ new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
+ return new_node
+
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+
+class DropRefcountingTransform(Visitor.VisitorTransform):
+ """Drop ref-counting in safe places.
+ """
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+ def visit_ParallelAssignmentNode(self, node):
+ """
+ Parallel swap assignments like 'a,b = b,a' are safe.
+ """
+ left_names, right_names = [], []
+ left_indices, right_indices = [], []
+ temps = []
+
+ for stat in node.stats:
+ if isinstance(stat, Nodes.SingleAssignmentNode):
+ if not self._extract_operand(stat.lhs, left_names,
+ left_indices, temps):
+ return node
+ if not self._extract_operand(stat.rhs, right_names,
+ right_indices, temps):
+ return node
+ elif isinstance(stat, Nodes.CascadedAssignmentNode):
+ # FIXME
+ return node
+ else:
+ return node
+
+ if left_names or right_names:
+ # lhs/rhs names must be a non-redundant permutation
+ lnames = [ path for path, n in left_names ]
+ rnames = [ path for path, n in right_names ]
+ if set(lnames) != set(rnames):
+ return node
+ if len(set(lnames)) != len(right_names):
+ return node
+
+ if left_indices or right_indices:
+ # base name and index of index nodes must be a
+ # non-redundant permutation
+ lindices = []
+ for lhs_node in left_indices:
+ index_id = self._extract_index_id(lhs_node)
+ if not index_id:
+ return node
+ lindices.append(index_id)
+ rindices = []
+ for rhs_node in right_indices:
+ index_id = self._extract_index_id(rhs_node)
+ if not index_id:
+ return node
+ rindices.append(index_id)
+
+ if set(lindices) != set(rindices):
+ return node
+ if len(set(lindices)) != len(right_indices):
+ return node
+
+ # really supporting IndexNode requires support in
+ # __Pyx_GetItemInt(), so let's stop short for now
+ return node
+
+ temp_args = [t.arg for t in temps]
+ for temp in temps:
+ temp.use_managed_ref = False
+
+ for _, name_node in left_names + right_names:
+ if name_node not in temp_args:
+ name_node.use_managed_ref = False
+
+ for index_node in left_indices + right_indices:
+ index_node.use_managed_ref = False
+
+ return node
+
+ def _extract_operand(self, node, names, indices, temps):
+ node = unwrap_node(node)
+ if not node.type.is_pyobject:
+ return False
+ if isinstance(node, ExprNodes.CoerceToTempNode):
+ temps.append(node)
+ node = node.arg
+ name_path = []
+ obj_node = node
+ while obj_node.is_attribute:
+ if obj_node.is_py_attr:
+ return False
+ name_path.append(obj_node.member)
+ obj_node = obj_node.obj
+ if obj_node.is_name:
+ name_path.append(obj_node.name)
+ names.append( ('.'.join(name_path[::-1]), node) )
+ elif node.is_subscript:
+ if node.base.type != Builtin.list_type:
+ return False
+ if not node.index.type.is_int:
+ return False
+ if not node.base.is_name:
+ return False
+ indices.append(node)
+ else:
+ return False
+ return True
+
+ def _extract_index_id(self, index_node):
+ base = index_node.base
+ index = index_node.index
+ if isinstance(index, ExprNodes.NameNode):
+ index_val = index.name
+ elif isinstance(index, ExprNodes.ConstNode):
+ # FIXME:
+ return None
+ else:
+ return None
+ return (base.name, index_val)
+
+
+class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
+ """Optimize some common calls to builtin types *before* the type
+ analysis phase and *after* the declarations analysis phase.
+
+ This transform cannot make use of any argument types, but it can
+ restructure the tree in a way that the type analysis phase can
+ respond to.
+
+ Introducing C function calls here may not be a good idea. Move
+ them to the OptimizeBuiltinCalls transform instead, which runs
+ after type analysis.
+ """
+ # only intercept on call nodes
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+ def visit_SimpleCallNode(self, node):
+ self.visitchildren(node)
+ function = node.function
+ if not self._function_is_builtin_name(function):
+ return node
+ return self._dispatch_to_handler(node, function, node.args)
+
+ def visit_GeneralCallNode(self, node):
+ self.visitchildren(node)
+ function = node.function
+ if not self._function_is_builtin_name(function):
+ return node
+ arg_tuple = node.positional_args
+ if not isinstance(arg_tuple, ExprNodes.TupleNode):
+ return node
+ args = arg_tuple.args
+ return self._dispatch_to_handler(
+ node, function, args, node.keyword_args)
+
+ def _function_is_builtin_name(self, function):
+ if not function.is_name:
+ return False
+ env = self.current_env()
+ entry = env.lookup(function.name)
+ if entry is not env.builtin_scope().lookup_here(function.name):
+ return False
+ # if entry is None, it's at least an undeclared name, so likely builtin
+ return True
+
+ def _dispatch_to_handler(self, node, function, args, kwargs=None):
+ if kwargs is None:
+ handler_name = '_handle_simple_function_%s' % function.name
+ else:
+ handler_name = '_handle_general_function_%s' % function.name
+ handle_call = getattr(self, handler_name, None)
+ if handle_call is not None:
+ if kwargs is None:
+ return handle_call(node, args)
+ else:
+ return handle_call(node, args, kwargs)
+ return node
+
+ def _inject_capi_function(self, node, cname, func_type, utility_code=None):
+ node.function = ExprNodes.PythonCapiFunctionNode(
+ node.function.pos, node.function.name, cname, func_type,
+ utility_code = utility_code)
+
+ def _error_wrong_arg_count(self, function_name, node, args, expected=None):
+ if not expected: # None or 0
+ arg_str = ''
+ elif isinstance(expected, basestring) or expected > 1:
+ arg_str = '...'
+ elif expected == 1:
+ arg_str = 'x'
+ else:
+ arg_str = ''
+ if expected is not None:
+ expected_str = 'expected %s, ' % expected
+ else:
+ expected_str = ''
+ error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
+ function_name, arg_str, expected_str, len(args)))
+
+ # specific handlers for simple call nodes
+
+ def _handle_simple_function_float(self, node, pos_args):
+ if not pos_args:
+ return ExprNodes.FloatNode(node.pos, value='0.0')
+ if len(pos_args) > 1:
+ self._error_wrong_arg_count('float', node, pos_args, 1)
+ arg_type = getattr(pos_args[0], 'type', None)
+ if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
+ return pos_args[0]
+ return node
+
+ def _handle_simple_function_slice(self, node, pos_args):
+ arg_count = len(pos_args)
+ start = step = None
+ if arg_count == 1:
+ stop, = pos_args
+ elif arg_count == 2:
+ start, stop = pos_args
+ elif arg_count == 3:
+ start, stop, step = pos_args
+ else:
+ self._error_wrong_arg_count('slice', node, pos_args)
+ return node
+ return ExprNodes.SliceNode(
+ node.pos,
+ start=start or ExprNodes.NoneNode(node.pos),
+ stop=stop,
+ step=step or ExprNodes.NoneNode(node.pos))
+
+ def _handle_simple_function_ord(self, node, pos_args):
+ """Unpack ord('X').
+ """
+ if len(pos_args) != 1:
+ return node
+ arg = pos_args[0]
+ if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
+ if len(arg.value) == 1:
+ return ExprNodes.IntNode(
+ arg.pos, type=PyrexTypes.c_long_type,
+ value=str(ord(arg.value)),
+ constant_result=ord(arg.value)
+ )
+ elif isinstance(arg, ExprNodes.StringNode):
+ if arg.unicode_value and len(arg.unicode_value) == 1 \
+ and ord(arg.unicode_value) <= 255: # Py2/3 portability
+ return ExprNodes.IntNode(
+ arg.pos, type=PyrexTypes.c_int_type,
+ value=str(ord(arg.unicode_value)),
+ constant_result=ord(arg.unicode_value)
+ )
+ return node
+
+ # sequence processing
+
+ def _handle_simple_function_all(self, node, pos_args):
+ """Transform
+
+ _result = all(p(x) for L in LL for x in L)
+
+ into
+
+ for L in LL:
+ for x in L:
+ if not p(x):
+ return False
+ else:
+ return True
+ """
+ return self._transform_any_all(node, pos_args, False)
+
+ def _handle_simple_function_any(self, node, pos_args):
+ """Transform
+
+ _result = any(p(x) for L in LL for x in L)
+
+ into
+
+ for L in LL:
+ for x in L:
+ if p(x):
+ return True
+ else:
+ return False
+ """
+ return self._transform_any_all(node, pos_args, True)
+
+ def _transform_any_all(self, node, pos_args, is_any):
+ if len(pos_args) != 1:
+ return node
+ if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
+ return node
+ gen_expr_node = pos_args[0]
+ generator_body = gen_expr_node.def_node.gbody
+ loop_node = generator_body.body
+ yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
+ if yield_expression is None:
+ return node
+
+ if is_any:
+ condition = yield_expression
+ else:
+ condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression)
+
+ test_node = Nodes.IfStatNode(
+ yield_expression.pos, else_clause=None, if_clauses=[
+ Nodes.IfClauseNode(
+ yield_expression.pos,
+ condition=condition,
+ body=Nodes.ReturnStatNode(
+ node.pos,
+ value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any))
+ )]
+ )
+ loop_node.else_clause = Nodes.ReturnStatNode(
+ node.pos,
+ value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any))
+
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node)
+
+ return ExprNodes.InlinedGeneratorExpressionNode(
+ gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all')
+
+ PySequence_List_func_type = PyrexTypes.CFuncType(
+ Builtin.list_type,
+ [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
+
+ def _handle_simple_function_sorted(self, node, pos_args):
+ """Transform sorted(genexpr) and sorted([listcomp]) into
+ [listcomp].sort(). CPython just reads the iterable into a
+ list and calls .sort() on it. Expanding the iterable in a
+ listcomp is still faster and the result can be sorted in
+ place.
+ """
+ if len(pos_args) != 1:
+ return node
+
+ arg = pos_args[0]
+ if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
+ list_node = pos_args[0]
+ loop_node = list_node.loop
+
+ elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
+ gen_expr_node = arg
+ loop_node = gen_expr_node.loop
+ yield_statements = _find_yield_statements(loop_node)
+ if not yield_statements:
+ return node
+
+ list_node = ExprNodes.InlinedGeneratorExpressionNode(
+ node.pos, gen_expr_node, orig_func='sorted',
+ comprehension_type=Builtin.list_type)
+
+ for yield_expression, yield_stat_node in yield_statements:
+ append_node = ExprNodes.ComprehensionAppendNode(
+ yield_expression.pos,
+ expr=yield_expression,
+ target=list_node.target)
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
+
+ elif arg.is_sequence_constructor:
+ # sorted([a, b, c]) or sorted((a, b, c)). The result is always a list,
+ # so starting off with a fresh one is more efficient.
+ list_node = loop_node = arg.as_list()
+
+ else:
+ # Interestingly, PySequence_List works on a lot of non-sequence
+ # things as well.
+ list_node = loop_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "PySequence_List", self.PySequence_List_func_type,
+ args=pos_args, is_temp=True)
+
+ result_node = UtilNodes.ResultRefNode(
+ pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False)
+ list_assign_node = Nodes.SingleAssignmentNode(
+ node.pos, lhs=result_node, rhs=list_node, first=True)
+
+ sort_method = ExprNodes.AttributeNode(
+ node.pos, obj=result_node, attribute=EncodedString('sort'),
+ # entry ? type ?
+ needs_none_check=False)
+ sort_node = Nodes.ExprStatNode(
+ node.pos, expr=ExprNodes.SimpleCallNode(
+ node.pos, function=sort_method, args=[]))
+
+ sort_node.analyse_declarations(self.current_env())
+
+ return UtilNodes.TempResultFromStatNode(
+ result_node,
+ Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node]))
+
+ def __handle_simple_function_sum(self, node, pos_args):
+ """Transform sum(genexpr) into an equivalent inlined aggregation loop.
+ """
+ if len(pos_args) not in (1,2):
+ return node
+ if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
+ ExprNodes.ComprehensionNode)):
+ return node
+ gen_expr_node = pos_args[0]
+ loop_node = gen_expr_node.loop
+
+ if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
+ yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
+ # FIXME: currently nonfunctional
+ yield_expression = None
+ if yield_expression is None:
+ return node
+ else: # ComprehensionNode
+ yield_stat_node = gen_expr_node.append
+ yield_expression = yield_stat_node.expr
+ try:
+ if not yield_expression.is_literal or not yield_expression.type.is_int:
+ return node
+ except AttributeError:
+ return node # in case we don't have a type yet
+ # special case: old Py2 backwards compatible "sum([int_const for ...])"
+ # can safely be unpacked into a genexpr
+
+ if len(pos_args) == 1:
+ start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
+ else:
+ start = pos_args[1]
+
+ result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
+ add_node = Nodes.SingleAssignmentNode(
+ yield_expression.pos,
+ lhs = result_ref,
+ rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
+ )
+
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node)
+
+ exec_code = Nodes.StatListNode(
+ node.pos,
+ stats = [
+ Nodes.SingleAssignmentNode(
+ start.pos,
+ lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
+ rhs = start,
+ first = True),
+ loop_node
+ ])
+
+ return ExprNodes.InlinedGeneratorExpressionNode(
+ gen_expr_node.pos, loop = exec_code, result_node = result_ref,
+ expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
+ has_local_scope = gen_expr_node.has_local_scope)
+
+ def _handle_simple_function_min(self, node, pos_args):
+ return self._optimise_min_max(node, pos_args, '<')
+
+ def _handle_simple_function_max(self, node, pos_args):
+ return self._optimise_min_max(node, pos_args, '>')
+
+ def _optimise_min_max(self, node, args, operator):
+ """Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
+ """
+ if len(args) <= 1:
+ if len(args) == 1 and args[0].is_sequence_constructor:
+ args = args[0].args
+ if len(args) <= 1:
+ # leave this to Python
+ return node
+
+ cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
+
+ last_result = args[0]
+ for arg_node in cascaded_nodes:
+ result_ref = UtilNodes.ResultRefNode(last_result)
+ last_result = ExprNodes.CondExprNode(
+ arg_node.pos,
+ true_val = arg_node,
+ false_val = result_ref,
+ test = ExprNodes.PrimaryCmpNode(
+ arg_node.pos,
+ operand1 = arg_node,
+ operator = operator,
+ operand2 = result_ref,
+ )
+ )
+ last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
+
+ for ref_node in cascaded_nodes[::-1]:
+ last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
+
+ return last_result
+
+ # builtin type creation
+
+ def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
+ if not pos_args:
+ return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
+ # This is a bit special - for iterables (including genexps),
+ # Python actually overallocates and resizes a newly created
+ # tuple incrementally while reading items, which we can't
+ # easily do without explicit node support. Instead, we read
+ # the items into a list and then copy them into a tuple of the
+ # final size. This takes up to twice as much memory, but will
+ # have to do until we have real support for genexps.
+ result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
+ if result is not node:
+ return ExprNodes.AsTupleNode(node.pos, arg=result)
+ return node
+
+ def _handle_simple_function_frozenset(self, node, pos_args):
+ """Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
+ """
+ if len(pos_args) != 1:
+ return node
+ if pos_args[0].is_sequence_constructor and not pos_args[0].args:
+ del pos_args[0]
+ elif isinstance(pos_args[0], ExprNodes.ListNode):
+ pos_args[0] = pos_args[0].as_tuple()
+ return node
+
+ def _handle_simple_function_list(self, node, pos_args):
+ if not pos_args:
+ return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
+ return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
+
+ def _handle_simple_function_set(self, node, pos_args):
+ if not pos_args:
+ return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
+ return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
+
+ def _transform_list_set_genexpr(self, node, pos_args, target_type):
+ """Replace set(genexpr) and list(genexpr) by an inlined comprehension.
+ """
+ if len(pos_args) > 1:
+ return node
+ if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
+ return node
+ gen_expr_node = pos_args[0]
+ loop_node = gen_expr_node.loop
+
+ yield_statements = _find_yield_statements(loop_node)
+ if not yield_statements:
+ return node
+
+ result_node = ExprNodes.InlinedGeneratorExpressionNode(
+ node.pos, gen_expr_node,
+ orig_func='set' if target_type is Builtin.set_type else 'list',
+ comprehension_type=target_type)
+
+ for yield_expression, yield_stat_node in yield_statements:
+ append_node = ExprNodes.ComprehensionAppendNode(
+ yield_expression.pos,
+ expr=yield_expression,
+ target=result_node.target)
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
+
+ return result_node
+
+ def _handle_simple_function_dict(self, node, pos_args):
+ """Replace dict( (a,b) for ... ) by an inlined { a:b for ... }
+ """
+ if len(pos_args) == 0:
+ return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
+ if len(pos_args) > 1:
+ return node
+ if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
+ return node
+ gen_expr_node = pos_args[0]
+ loop_node = gen_expr_node.loop
+
+ yield_statements = _find_yield_statements(loop_node)
+ if not yield_statements:
+ return node
+
+ for yield_expression, _ in yield_statements:
+ if not isinstance(yield_expression, ExprNodes.TupleNode):
+ return node
+ if len(yield_expression.args) != 2:
+ return node
+
+ result_node = ExprNodes.InlinedGeneratorExpressionNode(
+ node.pos, gen_expr_node, orig_func='dict',
+ comprehension_type=Builtin.dict_type)
+
+ for yield_expression, yield_stat_node in yield_statements:
+ append_node = ExprNodes.DictComprehensionAppendNode(
+ yield_expression.pos,
+ key_expr=yield_expression.args[0],
+ value_expr=yield_expression.args[1],
+ target=result_node.target)
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
+
+ return result_node
+
+ # specific handlers for general call nodes
+
+ def _handle_general_function_dict(self, node, pos_args, kwargs):
+ """Replace dict(a=b,c=d,...) by the underlying keyword dict
+ construction which is done anyway.
+ """
+ if len(pos_args) > 0:
+ return node
+ if not isinstance(kwargs, ExprNodes.DictNode):
+ return node
+ return kwargs
+
+
+class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+ def get_constant_value_node(self, name_node):
+ if name_node.cf_state is None:
+ return None
+ if name_node.cf_state.cf_is_null:
+ return None
+ entry = self.current_env().lookup(name_node.name)
+ if not entry or (not entry.cf_assignments
+ or len(entry.cf_assignments) != 1):
+ # not just a single assignment in all closures
+ return None
+ return entry.cf_assignments[0].rhs
+
+ def visit_SimpleCallNode(self, node):
+ self.visitchildren(node)
+ if not self.current_directives.get('optimize.inline_defnode_calls'):
+ return node
+ function_name = node.function
+ if not function_name.is_name:
+ return node
+ function = self.get_constant_value_node(function_name)
+ if not isinstance(function, ExprNodes.PyCFunctionNode):
+ return node
+ inlined = ExprNodes.InlinedDefNodeCallNode(
+ node.pos, function_name=function_name,
+ function=function, args=node.args)
+ if inlined.can_be_inlined():
+ return self.replace(node, inlined)
+ return node
+
+
+class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
+ Visitor.MethodDispatcherTransform):
+ """Optimize some common methods calls and instantiation patterns
+ for builtin types *after* the type analysis phase.
+
+ Running after type analysis, this transform can only perform
+ function replacements that do not alter the function return type
+ in a way that was not anticipated by the type analysis.
+ """
+ ### cleanup to avoid redundant coercions to/from Python types
+
+ def visit_PyTypeTestNode(self, node):
+ """Flatten redundant type checks after tree changes.
+ """
+ self.visitchildren(node)
+ return node.reanalyse()
+
+ def _visit_TypecastNode(self, node):
+ # disabled - the user may have had a reason to put a type
+ # cast, even if it looks redundant to Cython
+ """
+ Drop redundant type casts.
+ """
+ self.visitchildren(node)
+ if node.type == node.operand.type:
+ return node.operand
+ return node
+
+ def visit_ExprStatNode(self, node):
+ """
+ Drop dead code and useless coercions.
+ """
+ self.visitchildren(node)
+ if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
+ node.expr = node.expr.arg
+ expr = node.expr
+ if expr is None or expr.is_none or expr.is_literal:
+ # Expression was removed or is dead code => remove ExprStatNode as well.
+ return None
+ if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
+ # Ignore dead references to local variables etc.
+ return None
+ return node
+
+ def visit_CoerceToBooleanNode(self, node):
+ """Drop redundant conversion nodes after tree changes.
+ """
+ self.visitchildren(node)
+ arg = node.arg
+ if isinstance(arg, ExprNodes.PyTypeTestNode):
+ arg = arg.arg
+ if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
+ if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
+ return arg.arg.coerce_to_boolean(self.current_env())
+ return node
+
+ PyNumber_Float_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
+ ])
+
+ def visit_CoerceToPyTypeNode(self, node):
+ """Drop redundant conversion nodes after tree changes."""
+ self.visitchildren(node)
+ arg = node.arg
+ if isinstance(arg, ExprNodes.CoerceFromPyTypeNode):
+ arg = arg.arg
+ if isinstance(arg, ExprNodes.PythonCapiCallNode):
+ if arg.function.name == 'float' and len(arg.args) == 1:
+ # undo redundant Py->C->Py coercion
+ func_arg = arg.args[0]
+ if func_arg.type is Builtin.float_type:
+ return func_arg.as_none_safe_node("float() argument must be a string or a number, not 'NoneType'")
+ elif func_arg.type.is_pyobject:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, '__Pyx_PyNumber_Float', self.PyNumber_Float_func_type,
+ args=[func_arg],
+ py_name='float',
+ is_temp=node.is_temp,
+ result_is_used=node.result_is_used,
+ ).coerce_to(node.type, self.current_env())
+ return node
+
+ def visit_CoerceFromPyTypeNode(self, node):
+ """Drop redundant conversion nodes after tree changes.
+
+ Also, optimise away calls to Python's builtin int() and
+ float() if the result is going to be coerced back into a C
+ type anyway.
+ """
+ self.visitchildren(node)
+ arg = node.arg
+ if not arg.type.is_pyobject:
+ # no Python conversion left at all, just do a C coercion instead
+ if node.type != arg.type:
+ arg = arg.coerce_to(node.type, self.current_env())
+ return arg
+ if isinstance(arg, ExprNodes.PyTypeTestNode):
+ arg = arg.arg
+ if arg.is_literal:
+ if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
+ node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
+ node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
+ return arg.coerce_to(node.type, self.current_env())
+ elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
+ if arg.type is PyrexTypes.py_object_type:
+ if node.type.assignable_from(arg.arg.type):
+ # completely redundant C->Py->C coercion
+ return arg.arg.coerce_to(node.type, self.current_env())
+ elif arg.type is Builtin.unicode_type:
+ if arg.arg.type.is_unicode_char and node.type.is_unicode_char:
+ return arg.arg.coerce_to(node.type, self.current_env())
+ elif isinstance(arg, ExprNodes.SimpleCallNode):
+ if node.type.is_int or node.type.is_float:
+ return self._optimise_numeric_cast_call(node, arg)
+ elif arg.is_subscript:
+ index_node = arg.index
+ if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
+ index_node = index_node.arg
+ if index_node.type.is_int:
+ return self._optimise_int_indexing(node, arg, index_node)
+ return node
+
+ PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_char_type, [
+ PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
+ PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
+ ],
+ exception_value = "((char)-1)",
+ exception_check = True)
+
+ def _optimise_int_indexing(self, coerce_node, arg, index_node):
+ env = self.current_env()
+ bound_check_bool = env.directives['boundscheck'] and 1 or 0
+ if arg.base.type is Builtin.bytes_type:
+ if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
+ # bytes[index] -> char
+ bound_check_node = ExprNodes.IntNode(
+ coerce_node.pos, value=str(bound_check_bool),
+ constant_result=bound_check_bool)
+ node = ExprNodes.PythonCapiCallNode(
+ coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
+ self.PyBytes_GetItemInt_func_type,
+ args=[
+ arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
+ index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
+ bound_check_node,
+ ],
+ is_temp=True,
+ utility_code=UtilityCode.load_cached(
+ 'bytes_index', 'StringTools.c'))
+ if coerce_node.type is not PyrexTypes.c_char_type:
+ node = node.coerce_to(coerce_node.type, env)
+ return node
+ return coerce_node
+
+ float_float_func_types = dict(
+ (float_type, PyrexTypes.CFuncType(
+ float_type, [
+ PyrexTypes.CFuncTypeArg("arg", float_type, None)
+ ]))
+ for float_type in (PyrexTypes.c_float_type, PyrexTypes.c_double_type, PyrexTypes.c_longdouble_type))
+
+ def _optimise_numeric_cast_call(self, node, arg):
+ function = arg.function
+ args = None
+ if isinstance(arg, ExprNodes.PythonCapiCallNode):
+ args = arg.args
+ elif isinstance(function, ExprNodes.NameNode):
+ if function.type.is_builtin_type and isinstance(arg.arg_tuple, ExprNodes.TupleNode):
+ args = arg.arg_tuple.args
+
+ if args is None or len(args) != 1:
+ return node
+ func_arg = args[0]
+ if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
+ func_arg = func_arg.arg
+ elif func_arg.type.is_pyobject:
+ # play it safe: Python conversion might work on all sorts of things
+ return node
+
+ if function.name == 'int':
+ if func_arg.type.is_int or node.type.is_int:
+ if func_arg.type == node.type:
+ return func_arg
+ elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
+ return ExprNodes.TypecastNode(node.pos, operand=func_arg, type=node.type)
+ elif func_arg.type.is_float and node.type.is_numeric:
+ if func_arg.type.math_h_modifier == 'l':
+ # Work around missing Cygwin definition.
+ truncl = '__Pyx_truncl'
+ else:
+ truncl = 'trunc' + func_arg.type.math_h_modifier
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, truncl,
+ func_type=self.float_float_func_types[func_arg.type],
+ args=[func_arg],
+ py_name='int',
+ is_temp=node.is_temp,
+ result_is_used=node.result_is_used,
+ ).coerce_to(node.type, self.current_env())
+ elif function.name == 'float':
+ if func_arg.type.is_float or node.type.is_float:
+ if func_arg.type == node.type:
+ return func_arg
+ elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
+ return ExprNodes.TypecastNode(
+ node.pos, operand=func_arg, type=node.type)
+ return node
+
+ def _error_wrong_arg_count(self, function_name, node, args, expected=None):
+ if not expected: # None or 0
+ arg_str = ''
+ elif isinstance(expected, basestring) or expected > 1:
+ arg_str = '...'
+ elif expected == 1:
+ arg_str = 'x'
+ else:
+ arg_str = ''
+ if expected is not None:
+ expected_str = 'expected %s, ' % expected
+ else:
+ expected_str = ''
+ error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
+ function_name, arg_str, expected_str, len(args)))
+
+ ### generic fallbacks
+
+ def _handle_function(self, node, function_name, function, arg_list, kwargs):
+ return node
+
+ def _handle_method(self, node, type_name, attr_name, function,
+ arg_list, is_unbound_method, kwargs):
+ """
+ Try to inject C-API calls for unbound method calls to builtin types.
+ While the method declarations in Builtin.py already handle this, we
+ can additionally resolve bound and unbound methods here that were
+ assigned to variables ahead of time.
+ """
+ if kwargs:
+ return node
+ if not function or not function.is_attribute or not function.obj.is_name:
+ # cannot track unbound method calls over more than one indirection as
+ # the names might have been reassigned in the meantime
+ return node
+ type_entry = self.current_env().lookup(type_name)
+ if not type_entry:
+ return node
+ method = ExprNodes.AttributeNode(
+ node.function.pos,
+ obj=ExprNodes.NameNode(
+ function.pos,
+ name=type_name,
+ entry=type_entry,
+ type=type_entry.type),
+ attribute=attr_name,
+ is_called=True).analyse_as_type_attribute(self.current_env())
+ if method is None:
+ return self._optimise_generic_builtin_method_call(
+ node, attr_name, function, arg_list, is_unbound_method)
+ args = node.args
+ if args is None and node.arg_tuple:
+ args = node.arg_tuple.args
+ call_node = ExprNodes.SimpleCallNode(
+ node.pos,
+ function=method,
+ args=args)
+ if not is_unbound_method:
+ call_node.self = function.obj
+ call_node.analyse_c_function_call(self.current_env())
+ call_node.analysed = True
+ return call_node.coerce_to(node.type, self.current_env())
+
+ ### builtin types
+
+ def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
+ """
+ Try to inject an unbound method call for a call to a method of a known builtin type.
+ This enables caching the underlying C function of the method at runtime.
+ """
+ arg_count = len(arg_list)
+ if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
+ return node
+ if not function.obj.type.is_builtin_type:
+ return node
+ if function.obj.type.name in ('basestring', 'type'):
+ # these allow different actual types => unsafe
+ return node
+ return ExprNodes.CachedBuiltinMethodCallNode(
+ node, function.obj, attr_name, arg_list)
+
+ PyObject_Unicode_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_unicode(self, node, function, pos_args):
+ """Optimise single argument calls to unicode().
+ """
+ if len(pos_args) != 1:
+ if len(pos_args) == 0:
+ return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.unicode_type:
+ if not arg.may_be_none():
+ return arg
+ cname = "__Pyx_PyUnicode_Unicode"
+ utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
+ else:
+ cname = "__Pyx_PyObject_Unicode"
+ utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, cname, self.PyObject_Unicode_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ utility_code=utility_code,
+ py_name="unicode")
+
+ def visit_FormattedValueNode(self, node):
+ """Simplify or avoid plain string formatting of a unicode value.
+ This seems misplaced here, but plain unicode formatting is essentially
+ a call to the unicode() builtin, which is optimised right above.
+ """
+ self.visitchildren(node)
+ if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
+ if not node.conversion_char or node.conversion_char == 's':
+ # value is definitely a unicode string and we don't format it any special
+ return self._handle_simple_function_unicode(node, None, [node.value])
+ return node
+
+ PyDict_Copy_func_type = PyrexTypes.CFuncType(
+ Builtin.dict_type, [
+ PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
+ ])
+
+ def _handle_simple_function_dict(self, node, function, pos_args):
+ """Replace dict(some_dict) by PyDict_Copy(some_dict).
+ """
+ if len(pos_args) != 1:
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.dict_type:
+ arg = arg.as_none_safe_node("'NoneType' is not iterable")
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
+ args = [arg],
+ is_temp = node.is_temp
+ )
+ return node
+
+ PySequence_List_func_type = PyrexTypes.CFuncType(
+ Builtin.list_type,
+ [PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
+
+ def _handle_simple_function_list(self, node, function, pos_args):
+ """Turn list(ob) into PySequence_List(ob).
+ """
+ if len(pos_args) != 1:
+ return node
+ arg = pos_args[0]
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "PySequence_List", self.PySequence_List_func_type,
+ args=pos_args, is_temp=node.is_temp)
+
+ PyList_AsTuple_func_type = PyrexTypes.CFuncType(
+ Builtin.tuple_type, [
+ PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
+ ])
+
+ def _handle_simple_function_tuple(self, node, function, pos_args):
+ """Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
+ """
+ if len(pos_args) != 1 or not node.is_temp:
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.tuple_type and not arg.may_be_none():
+ return arg
+ if arg.type is Builtin.list_type:
+ pos_args[0] = arg.as_none_safe_node(
+ "'NoneType' object is not iterable")
+
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
+ args=pos_args, is_temp=node.is_temp)
+ else:
+ return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type)
+
+ PySet_New_func_type = PyrexTypes.CFuncType(
+ Builtin.set_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_set(self, node, function, pos_args):
+ if len(pos_args) != 1:
+ return node
+ if pos_args[0].is_sequence_constructor:
+ # We can optimise set([x,y,z]) safely into a set literal,
+ # but only if we create all items before adding them -
+ # adding an item may raise an exception if it is not
+ # hashable, but creating the later items may have
+ # side-effects.
+ args = []
+ temps = []
+ for arg in pos_args[0].args:
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ args.append(arg)
+ result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
+ self.replace(node, result)
+ for temp in temps[::-1]:
+ result = UtilNodes.EvalWithTempExprNode(temp, result)
+ return result
+ else:
+ # PySet_New(it) is better than a generic Python call to set(it)
+ return self.replace(node, ExprNodes.PythonCapiCallNode(
+ node.pos, "PySet_New",
+ self.PySet_New_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ py_name="set"))
+
+ PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
+ Builtin.frozenset_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_frozenset(self, node, function, pos_args):
+ if not pos_args:
+ pos_args = [ExprNodes.NullNode(node.pos)]
+ elif len(pos_args) > 1:
+ return node
+ elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
+ return pos_args[0]
+ # PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyFrozenSet_New",
+ self.PyFrozenSet_New_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
+ py_name="frozenset")
+
+ PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_double_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
+ ],
+ exception_value = "((double)-1)",
+ exception_check = True)
+
+ def _handle_simple_function_float(self, node, function, pos_args):
+ """Transform float() into either a C type cast or a faster C
+ function call.
+ """
+ # Note: this requires the float() function to be typed as
+ # returning a C 'double'
+ if len(pos_args) == 0:
+ return ExprNodes.FloatNode(
+ node, value="0.0", constant_result=0.0
+ ).coerce_to(Builtin.float_type, self.current_env())
+ elif len(pos_args) != 1:
+ self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
+ return node
+ func_arg = pos_args[0]
+ if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
+ func_arg = func_arg.arg
+ if func_arg.type is PyrexTypes.c_double_type:
+ return func_arg
+ elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
+ return ExprNodes.TypecastNode(
+ node.pos, operand=func_arg, type=node.type)
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyObject_AsDouble",
+ self.PyObject_AsDouble_func_type,
+ args = pos_args,
+ is_temp = node.is_temp,
+ utility_code = load_c_utility('pyobject_as_double'),
+ py_name = "float")
+
+ PyNumber_Int_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
+ ])
+
+ PyInt_FromDouble_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_double_type, None)
+ ])
+
+ def _handle_simple_function_int(self, node, function, pos_args):
+ """Transform int() into a faster C function call.
+ """
+ if len(pos_args) == 0:
+ return ExprNodes.IntNode(node.pos, value="0", constant_result=0,
+ type=PyrexTypes.py_object_type)
+ elif len(pos_args) != 1:
+ return node # int(x, base)
+ func_arg = pos_args[0]
+ if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
+ if func_arg.arg.type.is_float:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyInt_FromDouble", self.PyInt_FromDouble_func_type,
+ args=[func_arg.arg], is_temp=True, py_name='int',
+ utility_code=UtilityCode.load_cached("PyIntFromDouble", "TypeConversion.c"))
+ else:
+ return node # handled in visit_CoerceFromPyTypeNode()
+ if func_arg.type.is_pyobject and node.type.is_pyobject:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyNumber_Int", self.PyNumber_Int_func_type,
+ args=pos_args, is_temp=True, py_name='int')
+ return node
+
+ def _handle_simple_function_bool(self, node, function, pos_args):
+ """Transform bool(x) into a type coercion to a boolean.
+ """
+ if len(pos_args) == 0:
+ return ExprNodes.BoolNode(
+ node.pos, value=False, constant_result=False
+ ).coerce_to(Builtin.bool_type, self.current_env())
+ elif len(pos_args) != 1:
+ self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
+ return node
+ else:
+ # => !!<bint>(x) to make sure it's exactly 0 or 1
+ operand = pos_args[0].coerce_to_boolean(self.current_env())
+ operand = ExprNodes.NotNode(node.pos, operand = operand)
+ operand = ExprNodes.NotNode(node.pos, operand = operand)
+ # coerce back to Python object as that's the result we are expecting
+ return operand.coerce_to_pyobject(self.current_env())
+
+ ### builtin functions
+
+ Pyx_strlen_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_size_t_type, [
+ PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None)
+ ])
+
+ Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_size_t_type, [
+ PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None)
+ ])
+
+ PyObject_Size_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ssize_t_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
+ ],
+ exception_value="-1")
+
+ _map_to_capi_len_function = {
+ Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
+ Builtin.bytes_type: "PyBytes_GET_SIZE",
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.list_type: "PyList_GET_SIZE",
+ Builtin.tuple_type: "PyTuple_GET_SIZE",
+ Builtin.set_type: "PySet_GET_SIZE",
+ Builtin.frozenset_type: "PySet_GET_SIZE",
+ Builtin.dict_type: "PyDict_Size",
+ }.get
+
+ _ext_types_with_pysize = set(["cpython.array.array"])
+
+ def _handle_simple_function_len(self, node, function, pos_args):
+ """Replace len(char*) by the equivalent call to strlen(),
+ len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
+ len(known_builtin_type) by an equivalent C-API call.
+ """
+ if len(pos_args) != 1:
+ self._error_wrong_arg_count('len', node, pos_args, 1)
+ return node
+ arg = pos_args[0]
+ if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
+ arg = arg.arg
+ if arg.type.is_string:
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "strlen", self.Pyx_strlen_func_type,
+ args = [arg],
+ is_temp = node.is_temp,
+ utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ elif arg.type.is_pyunicode_ptr:
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
+ args = [arg],
+ is_temp = node.is_temp)
+ elif arg.type.is_memoryviewslice:
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_size_t_type, [
+ PyrexTypes.CFuncTypeArg("memoryviewslice", arg.type, None)
+ ], nogil=True)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_MemoryView_Len", func_type,
+ args=[arg], is_temp=node.is_temp)
+ elif arg.type.is_pyobject:
+ cfunc_name = self._map_to_capi_len_function(arg.type)
+ if cfunc_name is None:
+ arg_type = arg.type
+ if ((arg_type.is_extension_type or arg_type.is_builtin_type)
+ and arg_type.entry.qualified_name in self._ext_types_with_pysize):
+ cfunc_name = 'Py_SIZE'
+ else:
+ return node
+ arg = arg.as_none_safe_node(
+ "object of type 'NoneType' has no len()")
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, cfunc_name, self.PyObject_Size_func_type,
+ args=[arg], is_temp=node.is_temp)
+ elif arg.type.is_unicode_char:
+ return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
+ type=node.type)
+ else:
+ return node
+ if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
+ new_node = new_node.coerce_to(node.type, self.current_env())
+ return new_node
+
+ Pyx_Type_func_type = PyrexTypes.CFuncType(
+ Builtin.type_type, [
+ PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_type(self, node, function, pos_args):
+ """Replace type(o) by a macro call to Py_TYPE(o).
+ """
+ if len(pos_args) != 1:
+ return node
+ node = ExprNodes.PythonCapiCallNode(
+ node.pos, "Py_TYPE", self.Pyx_Type_func_type,
+ args = pos_args,
+ is_temp = False)
+ return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
+
+ Py_type_check_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_bint_type, [
+ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_isinstance(self, node, function, pos_args):
+ """Replace isinstance() checks against builtin types by the
+ corresponding C-API call.
+ """
+ if len(pos_args) != 2:
+ return node
+ arg, types = pos_args
+ temps = []
+ if isinstance(types, ExprNodes.TupleNode):
+ types = types.args
+ if len(types) == 1 and not types[0].type is Builtin.type_type:
+ return node # nothing to improve here
+ if arg.is_attribute or not arg.is_simple():
+ arg = UtilNodes.ResultRefNode(arg)
+ temps.append(arg)
+ elif types.type is Builtin.type_type:
+ types = [types]
+ else:
+ return node
+
+ tests = []
+ test_nodes = []
+ env = self.current_env()
+ for test_type_node in types:
+ builtin_type = None
+ if test_type_node.is_name:
+ if test_type_node.entry:
+ entry = env.lookup(test_type_node.entry.name)
+ if entry and entry.type and entry.type.is_builtin_type:
+ builtin_type = entry.type
+ if builtin_type is Builtin.type_type:
+ # all types have type "type", but there's only one 'type'
+ if entry.name != 'type' or not (
+ entry.scope and entry.scope.is_builtin_scope):
+ builtin_type = None
+ if builtin_type is not None:
+ type_check_function = entry.type.type_check_function(exact=False)
+ if type_check_function in tests:
+ continue
+ tests.append(type_check_function)
+ type_check_args = [arg]
+ elif test_type_node.type is Builtin.type_type:
+ type_check_function = '__Pyx_TypeCheck'
+ type_check_args = [arg, test_type_node]
+ else:
+ if not test_type_node.is_literal:
+ test_type_node = UtilNodes.ResultRefNode(test_type_node)
+ temps.append(test_type_node)
+ type_check_function = 'PyObject_IsInstance'
+ type_check_args = [arg, test_type_node]
+ test_nodes.append(
+ ExprNodes.PythonCapiCallNode(
+ test_type_node.pos, type_check_function, self.Py_type_check_func_type,
+ args=type_check_args,
+ is_temp=True,
+ ))
+
+ def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
+ or_node = make_binop_node(node.pos, 'or', a, b)
+ or_node.type = PyrexTypes.c_bint_type
+ or_node.wrap_operands(env)
+ return or_node
+
+ test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
+ for temp in temps[::-1]:
+ test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
+ return test_node
+
+ def _handle_simple_function_ord(self, node, function, pos_args):
+ """Unpack ord(Py_UNICODE) and ord('X').
+ """
+ if len(pos_args) != 1:
+ return node
+ arg = pos_args[0]
+ if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
+ if arg.arg.type.is_unicode_char:
+ return ExprNodes.TypecastNode(
+ arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type
+ ).coerce_to(node.type, self.current_env())
+ elif isinstance(arg, ExprNodes.UnicodeNode):
+ if len(arg.value) == 1:
+ return ExprNodes.IntNode(
+ arg.pos, type=PyrexTypes.c_int_type,
+ value=str(ord(arg.value)),
+ constant_result=ord(arg.value)
+ ).coerce_to(node.type, self.current_env())
+ elif isinstance(arg, ExprNodes.StringNode):
+ if arg.unicode_value and len(arg.unicode_value) == 1 \
+ and ord(arg.unicode_value) <= 255: # Py2/3 portability
+ return ExprNodes.IntNode(
+ arg.pos, type=PyrexTypes.c_int_type,
+ value=str(ord(arg.unicode_value)),
+ constant_result=ord(arg.unicode_value)
+ ).coerce_to(node.type, self.current_env())
+ return node
+
+ ### special methods
+
+ Pyx_tp_new_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
+ ])
+
+ Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
+ PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
+ ])
+
+ def _handle_any_slot__new__(self, node, function, args,
+ is_unbound_method, kwargs=None):
+ """Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
+ """
+ obj = function.obj
+ if not is_unbound_method or len(args) < 1:
+ return node
+ type_arg = args[0]
+ if not obj.is_name or not type_arg.is_name:
+ # play safe
+ return node
+ if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
+ # not a known type, play safe
+ return node
+ if not type_arg.type_entry or not obj.type_entry:
+ if obj.name != type_arg.name:
+ return node
+ # otherwise, we know it's a type and we know it's the same
+ # type for both - that should do
+ elif type_arg.type_entry != obj.type_entry:
+ # different types - may or may not lead to an error at runtime
+ return node
+
+ args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
+ args_tuple = args_tuple.analyse_types(
+ self.current_env(), skip_children=True)
+
+ if type_arg.type_entry:
+ ext_type = type_arg.type_entry.type
+ if (ext_type.is_extension_type and ext_type.typeobj_cname and
+ ext_type.scope.global_scope() == self.current_env().global_scope()):
+ # known type in current module
+ tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
+ slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
+ if slot_func_cname:
+ cython_scope = self.context.cython_scope
+ PyTypeObjectPtr = PyrexTypes.CPtrType(
+ cython_scope.lookup('PyTypeObject').type)
+ pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
+ ext_type, [
+ PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
+ PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
+ ])
+
+ type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
+ if not kwargs:
+ kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, slot_func_cname,
+ pyx_tp_new_kwargs_func_type,
+ args=[type_arg, args_tuple, kwargs],
+ may_return_none=False,
+ is_temp=True)
+ else:
+ # arbitrary variable, needs a None check for safety
+ type_arg = type_arg.as_none_safe_node(
+ "object.__new__(X): X is not a type object (NoneType)")
+
+ utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
+ if kwargs:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
+ args=[type_arg, args_tuple, kwargs],
+ utility_code=utility_code,
+ is_temp=node.is_temp
+ )
+ else:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
+ args=[type_arg, args_tuple],
+ utility_code=utility_code,
+ is_temp=node.is_temp
+ )
+
+ ### methods of builtin types
+
+ PyObject_Append_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_returncode_type, [
+ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
+ ],
+ exception_value="-1")
+
+ def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
+ """Optimistic optimisation as X.append() is almost always
+ referring to a list.
+ """
+ if len(args) != 2 or node.result_is_used or node.function.entry:
+ return node
+
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
+ args=args,
+ may_return_none=False,
+ is_temp=node.is_temp,
+ result_is_used=False,
+ utility_code=load_c_utility('append')
+ )
+
+ def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
+ """Replace list.extend([...]) for short sequence literals values by sequential appends
+ to avoid creating an intermediate sequence argument.
+ """
+ if len(args) != 2:
+ return node
+ obj, value = args
+ if not value.is_sequence_constructor:
+ return node
+ items = list(value.args)
+ if value.mult_factor is not None or len(items) > 8:
+ # Appending wins for short sequences but slows down when multiple resize operations are needed.
+ # This seems to be a good enough limit that avoids repeated resizing.
+ if False and isinstance(value, ExprNodes.ListNode):
+ # One would expect that tuples are more efficient here, but benchmarking with
+ # Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
+ # Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
+ # which is probably tuned more towards lists than tuples (and rightly so).
+ tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
+ Visitor.recursively_replace_node(node, args[1], tuple_node)
+ return node
+ wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
+ if not items:
+ # Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
+ wrapped_obj.result_is_used = node.result_is_used
+ return wrapped_obj
+ cloned_obj = obj = wrapped_obj
+ if len(items) > 1 and not obj.is_simple():
+ cloned_obj = UtilNodes.LetRefNode(obj)
+ # Use ListComp_Append() for all but the last item and finish with PyList_Append()
+ # to shrink the list storage size at the very end if necessary.
+ temps = []
+ arg = items[-1]
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg],
+ is_temp=True,
+ utility_code=load_c_utility("ListAppend"))
+ for arg in items[-2::-1]:
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.binop_node(
+ node.pos, '|',
+ ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg], py_name="extend",
+ is_temp=True,
+ utility_code=load_c_utility("ListCompAppend")),
+ new_node,
+ type=PyrexTypes.c_returncode_type,
+ )
+ new_node.result_is_used = node.result_is_used
+ if cloned_obj is not obj:
+ temps.append(cloned_obj)
+ for temp in temps:
+ new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
+ new_node.result_is_used = node.result_is_used
+ return new_node
+
+ PyByteArray_Append_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_returncode_type, [
+ PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
+ ],
+ exception_value="-1")
+
+ PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_returncode_type, [
+ PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
+ ],
+ exception_value="-1")
+
+ def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
+ if len(args) != 2:
+ return node
+ func_name = "__Pyx_PyByteArray_Append"
+ func_type = self.PyByteArray_Append_func_type
+
+ value = unwrap_coerced_node(args[1])
+ if value.type.is_int or isinstance(value, ExprNodes.IntNode):
+ value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
+ utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
+ elif value.is_string_literal:
+ if not value.can_coerce_to_char_literal():
+ return node
+ value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
+ utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
+ elif value.type.is_pyobject:
+ func_name = "__Pyx_PyByteArray_AppendObject"
+ func_type = self.PyByteArray_AppendObject_func_type
+ utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
+ else:
+ return node
+
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, func_name, func_type,
+ args=[args[0], value],
+ may_return_none=False,
+ is_temp=node.is_temp,
+ utility_code=utility_code,
+ )
+ if node.result_is_used:
+ new_node = new_node.coerce_to(node.type, self.current_env())
+ return new_node
+
+ PyObject_Pop_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
+ ])
+
+ PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
+ ],
+ has_varargs=True) # to fake the additional macro args that lack a proper C type
+
+ def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
+ return self._handle_simple_method_object_pop(
+ node, function, args, is_unbound_method, is_list=True)
+
+ def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
+ """Optimistic optimisation as X.pop([n]) is almost always
+ referring to a list.
+ """
+ if not args:
+ return node
+ obj = args[0]
+ if is_list:
+ type_name = 'List'
+ obj = obj.as_none_safe_node(
+ "'NoneType' object has no attribute '%.30s'",
+ error="PyExc_AttributeError",
+ format_args=['pop'])
+ else:
+ type_name = 'Object'
+ if len(args) == 1:
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_Py%s_Pop" % type_name,
+ self.PyObject_Pop_func_type,
+ args=[obj],
+ may_return_none=True,
+ is_temp=node.is_temp,
+ utility_code=load_c_utility('pop'),
+ )
+ elif len(args) == 2:
+ index = unwrap_coerced_node(args[1])
+ py_index = ExprNodes.NoneNode(index.pos)
+ orig_index_type = index.type
+ if not index.type.is_int:
+ if isinstance(index, ExprNodes.IntNode):
+ py_index = index.coerce_to_pyobject(self.current_env())
+ index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ elif is_list:
+ if index.type.is_pyobject:
+ py_index = index.coerce_to_simple(self.current_env())
+ index = ExprNodes.CloneNode(py_index)
+ index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ else:
+ return node
+ elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
+ return node
+ elif isinstance(index, ExprNodes.IntNode):
+ py_index = index.coerce_to_pyobject(self.current_env())
+ # real type might still be larger at runtime
+ if not orig_index_type.is_int:
+ orig_index_type = index.type
+ if not orig_index_type.create_to_py_utility_code(self.current_env()):
+ return node
+ convert_func = orig_index_type.to_py_function
+ conversion_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_Py%s_PopIndex" % type_name,
+ self.PyObject_PopIndex_func_type,
+ args=[obj, py_index, index,
+ ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0),
+ constant_result=orig_index_type.signed and 1 or 0,
+ type=PyrexTypes.c_int_type),
+ ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
+ orig_index_type.empty_declaration_code()),
+ ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
+ may_return_none=True,
+ is_temp=node.is_temp,
+ utility_code=load_c_utility("pop_index"),
+ )
+
+ return node
+
+ single_param_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_returncode_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
+ ],
+ exception_value = "-1")
+
+ def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
+ """Call PyList_Sort() instead of the 0-argument l.sort().
+ """
+ if len(args) != 1:
+ return node
+ return self._substitute_method_call(
+ node, function, "PyList_Sort", self.single_param_func_type,
+ 'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
+
+ Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
+ """Replace dict.get() by a call to PyDict_GetItem().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NoneNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
+ return node
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
+ 'get', is_unbound_method, args,
+ may_return_none = True,
+ utility_code = load_c_utility("dict_getitem_default"))
+
+ Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
+ ])
+
+ def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
+ """Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NoneNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
+ return node
+ key_type = args[1].type
+ if key_type.is_builtin_type:
+ is_safe_type = int(key_type.name in
+ 'str bytes unicode float int long bool')
+ elif key_type is PyrexTypes.py_object_type:
+ is_safe_type = -1 # don't know
+ else:
+ is_safe_type = 0 # definitely not
+ args.append(ExprNodes.IntNode(
+ node.pos, value=str(is_safe_type), constant_result=is_safe_type))
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
+ 'setdefault', is_unbound_method, args,
+ may_return_none=True,
+ utility_code=load_c_utility('dict_setdefault'))
+
+ PyDict_Pop_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
+ """Replace dict.pop() by a call to _PyDict_Pop().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NullNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
+ return node
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
+ 'pop', is_unbound_method, args,
+ may_return_none=True,
+ utility_code=load_c_utility('py_dict_pop'))
+
+ Pyx_BinopInt_func_types = dict(
+ ((ctype, ret_type), PyrexTypes.CFuncType(
+ ret_type, [
+ PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("cval", ctype, None),
+ PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
+ PyrexTypes.CFuncTypeArg("zerodiv_check", PyrexTypes.c_bint_type, None),
+ ], exception_value=None if ret_type.is_pyobject else ret_type.exception_value))
+ for ctype in (PyrexTypes.c_long_type, PyrexTypes.c_double_type)
+ for ret_type in (PyrexTypes.py_object_type, PyrexTypes.c_bint_type)
+ )
+
+ def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___ne__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('And', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
+ if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
+ return node
+ if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
+ return node
+ return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method):
+ if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
+ return node
+ if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
+ return node
+ return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
+
+ def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
+ if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
+ return node
+ if isinstance(args[1], ExprNodes.IntNode):
+ if not (-2**30 <= args[1].constant_result <= 2**30):
+ return node
+ elif isinstance(args[1], ExprNodes.FloatNode):
+ if not (-2**53 <= args[1].constant_result <= 2**53):
+ return node
+ else:
+ return node
+ return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
+
+ def _handle_simple_method_float___ne__(self, node, function, args, is_unbound_method):
+ return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
+
+ def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
+ """
+ Optimise math operators for (likely) float or small integer operations.
+ """
+ if len(args) != 2:
+ return node
+
+ if node.type.is_pyobject:
+ ret_type = PyrexTypes.py_object_type
+ elif node.type is PyrexTypes.c_bint_type and operator in ('Eq', 'Ne'):
+ ret_type = PyrexTypes.c_bint_type
+ else:
+ return node
+
+ # When adding IntNode/FloatNode to something else, assume other operand is also numeric.
+ # Prefer constants on RHS as they allows better size control for some operators.
+ num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
+ if isinstance(args[1], num_nodes):
+ if args[0].type is not PyrexTypes.py_object_type:
+ return node
+ numval = args[1]
+ arg_order = 'ObjC'
+ elif isinstance(args[0], num_nodes):
+ if args[1].type is not PyrexTypes.py_object_type:
+ return node
+ numval = args[0]
+ arg_order = 'CObj'
+ else:
+ return node
+
+ if not numval.has_constant_result():
+ return node
+
+ is_float = isinstance(numval, ExprNodes.FloatNode)
+ num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type
+ if is_float:
+ if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
+ return node
+ elif operator == 'Divide':
+ # mixed old-/new-style division is not currently optimised for integers
+ return node
+ elif abs(numval.constant_result) > 2**30:
+ # Cut off at an integer border that is still safe for all operations.
+ return node
+
+ if operator in ('TrueDivide', 'FloorDivide', 'Divide', 'Remainder'):
+ if args[1].constant_result == 0:
+ # Don't optimise division by 0. :)
+ return node
+
+ args = list(args)
+ args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
+ numval.pos, value=numval.value, constant_result=numval.constant_result,
+ type=num_type))
+ inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
+ args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace))
+ if is_float or operator not in ('Eq', 'Ne'):
+ # "PyFloatBinop" and "PyIntBinop" take an additional "check for zero division" argument.
+ zerodivision_check = arg_order == 'CObj' and (
+ not node.cdivision if isinstance(node, ExprNodes.DivNode) else False)
+ args.append(ExprNodes.BoolNode(node.pos, value=zerodivision_check, constant_result=zerodivision_check))
+
+ utility_code = TempitaUtilityCode.load_cached(
+ "PyFloatBinop" if is_float else "PyIntCompare" if operator in ('Eq', 'Ne') else "PyIntBinop",
+ "Optimize.c",
+ context=dict(op=operator, order=arg_order, ret_type=ret_type))
+
+ call_node = self._substitute_method_call(
+ node, function,
+ "__Pyx_Py%s_%s%s%s" % (
+ 'Float' if is_float else 'Int',
+ '' if ret_type.is_pyobject else 'Bool',
+ operator,
+ arg_order),
+ self.Pyx_BinopInt_func_types[(num_type, ret_type)],
+ '__%s__' % operator[:3].lower(), is_unbound_method, args,
+ may_return_none=True,
+ with_none_check=False,
+ utility_code=utility_code)
+
+ if node.type.is_pyobject and not ret_type.is_pyobject:
+ call_node = ExprNodes.CoerceToPyTypeNode(call_node, self.current_env(), node.type)
+ return call_node
+
+ ### unicode type methods
+
+ PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_bint_type, [
+ PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
+ ])
+
+ def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
+ if is_unbound_method or len(args) != 1:
+ return node
+ ustring = args[0]
+ if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
+ not ustring.arg.type.is_unicode_char:
+ return node
+ uchar = ustring.arg
+ method_name = function.attribute
+ if method_name == 'istitle':
+ # istitle() doesn't directly map to Py_UNICODE_ISTITLE()
+ utility_code = UtilityCode.load_cached(
+ "py_unicode_istitle", "StringTools.c")
+ function_name = '__Pyx_Py_UNICODE_ISTITLE'
+ else:
+ utility_code = None
+ function_name = 'Py_UNICODE_%s' % method_name.upper()
+ func_call = self._substitute_method_call(
+ node, function,
+ function_name, self.PyUnicode_uchar_predicate_func_type,
+ method_name, is_unbound_method, [uchar],
+ utility_code = utility_code)
+ if node.type.is_pyobject:
+ func_call = func_call.coerce_to_pyobject(self.current_env)
+ return func_call
+
+ _handle_simple_method_unicode_isalnum = _inject_unicode_predicate
+ _handle_simple_method_unicode_isalpha = _inject_unicode_predicate
+ _handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
+ _handle_simple_method_unicode_isdigit = _inject_unicode_predicate
+ _handle_simple_method_unicode_islower = _inject_unicode_predicate
+ _handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
+ _handle_simple_method_unicode_isspace = _inject_unicode_predicate
+ _handle_simple_method_unicode_istitle = _inject_unicode_predicate
+ _handle_simple_method_unicode_isupper = _inject_unicode_predicate
+
+ PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ucs4_type, [
+ PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
+ ])
+
+ def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
+ if is_unbound_method or len(args) != 1:
+ return node
+ ustring = args[0]
+ if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
+ not ustring.arg.type.is_unicode_char:
+ return node
+ uchar = ustring.arg
+ method_name = function.attribute
+ function_name = 'Py_UNICODE_TO%s' % method_name.upper()
+ func_call = self._substitute_method_call(
+ node, function,
+ function_name, self.PyUnicode_uchar_conversion_func_type,
+ method_name, is_unbound_method, [uchar])
+ if node.type.is_pyobject:
+ func_call = func_call.coerce_to_pyobject(self.current_env)
+ return func_call
+
+ _handle_simple_method_unicode_lower = _inject_unicode_character_conversion
+ _handle_simple_method_unicode_upper = _inject_unicode_character_conversion
+ _handle_simple_method_unicode_title = _inject_unicode_character_conversion
+
+ PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
+ Builtin.list_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
+ ])
+
+ def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
+ """Replace unicode.splitlines(...) by a direct call to the
+ corresponding C-API function.
+ """
+ if len(args) not in (1,2):
+ self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
+ return node
+ self._inject_bint_default_argument(node, args, 1, False)
+
+ return self._substitute_method_call(
+ node, function,
+ "PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
+ 'splitlines', is_unbound_method, args)
+
+ PyUnicode_Split_func_type = PyrexTypes.CFuncType(
+ Builtin.list_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
+ ]
+ )
+
+ def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
+ """Replace unicode.split(...) by a direct call to the
+ corresponding C-API function.
+ """
+ if len(args) not in (1,2,3):
+ self._error_wrong_arg_count('unicode.split', node, args, "1-3")
+ return node
+ if len(args) < 2:
+ args.append(ExprNodes.NullNode(node.pos))
+ self._inject_int_default_argument(
+ node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
+
+ return self._substitute_method_call(
+ node, function,
+ "PyUnicode_Split", self.PyUnicode_Split_func_type,
+ 'split', is_unbound_method, args)
+
+ PyUnicode_Join_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method):
+ """
+ unicode.join() builds a list first => see if we can do this more efficiently
+ """
+ if len(args) != 2:
+ self._error_wrong_arg_count('unicode.join', node, args, "2")
+ return node
+ if isinstance(args[1], ExprNodes.GeneratorExpressionNode):
+ gen_expr_node = args[1]
+ loop_node = gen_expr_node.loop
+
+ yield_statements = _find_yield_statements(loop_node)
+ if yield_statements:
+ inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode(
+ node.pos, gen_expr_node, orig_func='list',
+ comprehension_type=Builtin.list_type)
+
+ for yield_expression, yield_stat_node in yield_statements:
+ append_node = ExprNodes.ComprehensionAppendNode(
+ yield_expression.pos,
+ expr=yield_expression,
+ target=inlined_genexpr.target)
+
+ Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
+
+ args[1] = inlined_genexpr
+
+ return self._substitute_method_call(
+ node, function,
+ "PyUnicode_Join", self.PyUnicode_Join_func_type,
+ 'join', is_unbound_method, args)
+
+ PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_bint_type, [
+ PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
+ PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
+ ],
+ exception_value = '-1')
+
+ def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'unicode', 'endswith',
+ unicode_tailmatch_utility_code, +1)
+
+ def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'unicode', 'startswith',
+ unicode_tailmatch_utility_code, -1)
+
+ def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
+ method_name, utility_code, direction):
+ """Replace unicode.startswith(...) and unicode.endswith(...)
+ by a direct call to the corresponding C-API function.
+ """
+ if len(args) not in (2,3,4):
+ self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
+ return node
+ self._inject_int_default_argument(
+ node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
+ self._inject_int_default_argument(
+ node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
+ args.append(ExprNodes.IntNode(
+ node.pos, value=str(direction), type=PyrexTypes.c_int_type))
+
+ method_call = self._substitute_method_call(
+ node, function,
+ "__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
+ self.PyString_Tailmatch_func_type,
+ method_name, is_unbound_method, args,
+ utility_code = utility_code)
+ return method_call.coerce_to(Builtin.bool_type, self.current_env())
+
+ PyUnicode_Find_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ssize_t_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
+ ],
+ exception_value = '-2')
+
+ def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
+ return self._inject_unicode_find(
+ node, function, args, is_unbound_method, 'find', +1)
+
+ def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
+ return self._inject_unicode_find(
+ node, function, args, is_unbound_method, 'rfind', -1)
+
+ def _inject_unicode_find(self, node, function, args, is_unbound_method,
+ method_name, direction):
+ """Replace unicode.find(...) and unicode.rfind(...) by a
+ direct call to the corresponding C-API function.
+ """
+ if len(args) not in (2,3,4):
+ self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
+ return node
+ self._inject_int_default_argument(
+ node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
+ self._inject_int_default_argument(
+ node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
+ args.append(ExprNodes.IntNode(
+ node.pos, value=str(direction), type=PyrexTypes.c_int_type))
+
+ method_call = self._substitute_method_call(
+ node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
+ method_name, is_unbound_method, args)
+ return method_call.coerce_to_pyobject(self.current_env())
+
+ PyUnicode_Count_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_py_ssize_t_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
+ ],
+ exception_value = '-1')
+
+ def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
+ """Replace unicode.count(...) by a direct call to the
+ corresponding C-API function.
+ """
+ if len(args) not in (2,3,4):
+ self._error_wrong_arg_count('unicode.count', node, args, "2-4")
+ return node
+ self._inject_int_default_argument(
+ node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
+ self._inject_int_default_argument(
+ node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
+
+ method_call = self._substitute_method_call(
+ node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
+ 'count', is_unbound_method, args)
+ return method_call.coerce_to_pyobject(self.current_env())
+
+ PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
+ ])
+
+ def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
+ """Replace unicode.replace(...) by a direct call to the
+ corresponding C-API function.
+ """
+ if len(args) not in (3,4):
+ self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
+ return node
+ self._inject_int_default_argument(
+ node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
+
+ return self._substitute_method_call(
+ node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
+ 'replace', is_unbound_method, args)
+
+ PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
+ Builtin.bytes_type, [
+ PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
+ PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
+ ])
+
+ PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
+ Builtin.bytes_type, [
+ PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
+ ])
+
+ _special_encodings = ['UTF8', 'UTF16', 'UTF-16LE', 'UTF-16BE', 'Latin1', 'ASCII',
+ 'unicode_escape', 'raw_unicode_escape']
+
+ _special_codecs = [ (name, codecs.getencoder(name))
+ for name in _special_encodings ]
+
+ def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
+ """Replace unicode.encode(...) by a direct C-API call to the
+ corresponding codec.
+ """
+ if len(args) < 1 or len(args) > 3:
+ self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
+ return node
+
+ string_node = args[0]
+
+ if len(args) == 1:
+ null_node = ExprNodes.NullNode(node.pos)
+ return self._substitute_method_call(
+ node, function, "PyUnicode_AsEncodedString",
+ self.PyUnicode_AsEncodedString_func_type,
+ 'encode', is_unbound_method, [string_node, null_node, null_node])
+
+ parameters = self._unpack_encoding_and_error_mode(node.pos, args)
+ if parameters is None:
+ return node
+ encoding, encoding_node, error_handling, error_handling_node = parameters
+
+ if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
+ # constant, so try to do the encoding at compile time
+ try:
+ value = string_node.value.encode(encoding, error_handling)
+ except:
+ # well, looks like we can't
+ pass
+ else:
+ value = bytes_literal(value, encoding)
+ return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type)
+
+ if encoding and error_handling == 'strict':
+ # try to find a specific encoder function
+ codec_name = self._find_special_codec_name(encoding)
+ if codec_name is not None and '-' not in codec_name:
+ encode_function = "PyUnicode_As%sString" % codec_name
+ return self._substitute_method_call(
+ node, function, encode_function,
+ self.PyUnicode_AsXyzString_func_type,
+ 'encode', is_unbound_method, [string_node])
+
+ return self._substitute_method_call(
+ node, function, "PyUnicode_AsEncodedString",
+ self.PyUnicode_AsEncodedString_func_type,
+ 'encode', is_unbound_method,
+ [string_node, encoding_node, error_handling_node])
+
+ PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
+ ]))
+
+ _decode_c_string_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
+ ])
+
+ _decode_bytes_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
+ ])
+
+ _decode_cpp_string_func_type = None # lazy init
+
+ def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
+ """Replace char*.decode() by a direct C-API call to the
+ corresponding codec, possibly resolving a slice on the char*.
+ """
+ if not (1 <= len(args) <= 3):
+ self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
+ return node
+
+ # normalise input nodes
+ string_node = args[0]
+ start = stop = None
+ if isinstance(string_node, ExprNodes.SliceIndexNode):
+ index_node = string_node
+ string_node = index_node.base
+ start, stop = index_node.start, index_node.stop
+ if not start or start.constant_result == 0:
+ start = None
+ if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
+ string_node = string_node.arg
+
+ string_type = string_node.type
+ if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
+ if is_unbound_method:
+ string_node = string_node.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=['decode', string_type.name])
+ else:
+ string_node = string_node.as_none_safe_node(
+ "'NoneType' object has no attribute '%.30s'",
+ error="PyExc_AttributeError",
+ format_args=['decode'])
+ elif not string_type.is_string and not string_type.is_cpp_string:
+ # nothing to optimise here
+ return node
+
+ parameters = self._unpack_encoding_and_error_mode(node.pos, args)
+ if parameters is None:
+ return node
+ encoding, encoding_node, error_handling, error_handling_node = parameters
+
+ if not start:
+ start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
+ elif not start.type.is_int:
+ start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ if stop and not stop.type.is_int:
+ stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+
+ # try to find a specific encoder function
+ codec_name = None
+ if encoding is not None:
+ codec_name = self._find_special_codec_name(encoding)
+ if codec_name is not None:
+ if codec_name in ('UTF16', 'UTF-16LE', 'UTF-16BE'):
+ codec_cname = "__Pyx_PyUnicode_Decode%s" % codec_name.replace('-', '')
+ else:
+ codec_cname = "PyUnicode_Decode%s" % codec_name
+ decode_function = ExprNodes.RawCNameExprNode(
+ node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname=codec_cname)
+ encoding_node = ExprNodes.NullNode(node.pos)
+ else:
+ decode_function = ExprNodes.NullNode(node.pos)
+
+ # build the helper function call
+ temps = []
+ if string_type.is_string:
+ # C string
+ if not stop:
+ # use strlen() to find the string length, just as CPython would
+ if not string_node.is_name:
+ string_node = UtilNodes.LetRefNode(string_node) # used twice
+ temps.append(string_node)
+ stop = ExprNodes.PythonCapiCallNode(
+ string_node.pos, "strlen", self.Pyx_strlen_func_type,
+ args=[string_node],
+ is_temp=False,
+ utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
+ ).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
+ helper_func_type = self._decode_c_string_func_type
+ utility_code_name = 'decode_c_string'
+ elif string_type.is_cpp_string:
+ # C++ std::string
+ if not stop:
+ stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
+ constant_result=ExprNodes.not_a_constant)
+ if self._decode_cpp_string_func_type is None:
+ # lazy init to reuse the C++ string type
+ self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("string", string_type, None),
+ PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
+ PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
+ ])
+ helper_func_type = self._decode_cpp_string_func_type
+ utility_code_name = 'decode_cpp_string'
+ else:
+ # Python bytes/bytearray object
+ if not stop:
+ stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
+ constant_result=ExprNodes.not_a_constant)
+ helper_func_type = self._decode_bytes_func_type
+ if string_type is Builtin.bytes_type:
+ utility_code_name = 'decode_bytes'
+ else:
+ utility_code_name = 'decode_bytearray'
+
+ node = ExprNodes.PythonCapiCallNode(
+ node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
+ args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
+ is_temp=node.is_temp,
+ utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
+ )
+
+ for temp in temps[::-1]:
+ node = UtilNodes.EvalWithTempExprNode(temp, node)
+ return node
+
+ _handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
+
+ def _find_special_codec_name(self, encoding):
+ try:
+ requested_codec = codecs.getencoder(encoding)
+ except LookupError:
+ return None
+ for name, codec in self._special_codecs:
+ if codec == requested_codec:
+ if '_' in name:
+ name = ''.join([s.capitalize()
+ for s in name.split('_')])
+ return name
+ return None
+
+ def _unpack_encoding_and_error_mode(self, pos, args):
+ null_node = ExprNodes.NullNode(pos)
+
+ if len(args) >= 2:
+ encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
+ if encoding_node is None:
+ return None
+ else:
+ encoding = None
+ encoding_node = null_node
+
+ if len(args) == 3:
+ error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
+ if error_handling_node is None:
+ return None
+ if error_handling == 'strict':
+ error_handling_node = null_node
+ else:
+ error_handling = 'strict'
+ error_handling_node = null_node
+
+ return (encoding, encoding_node, error_handling, error_handling_node)
+
+ def _unpack_string_and_cstring_node(self, node):
+ if isinstance(node, ExprNodes.CoerceToPyTypeNode):
+ node = node.arg
+ if isinstance(node, ExprNodes.UnicodeNode):
+ encoding = node.value
+ node = ExprNodes.BytesNode(
+ node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type)
+ elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
+ encoding = node.value.decode('ISO-8859-1')
+ node = ExprNodes.BytesNode(
+ node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type)
+ elif node.type is Builtin.bytes_type:
+ encoding = None
+ node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env())
+ elif node.type.is_string:
+ encoding = None
+ else:
+ encoding = node = None
+ return encoding, node
+
+ def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'str', 'endswith',
+ str_tailmatch_utility_code, +1)
+
+ def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'str', 'startswith',
+ str_tailmatch_utility_code, -1)
+
+ def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'bytes', 'endswith',
+ bytes_tailmatch_utility_code, +1)
+
+ def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'bytes', 'startswith',
+ bytes_tailmatch_utility_code, -1)
+
+ ''' # disabled for now, enable when we consider it worth it (see StringTools.c)
+ def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'bytearray', 'endswith',
+ bytes_tailmatch_utility_code, +1)
+
+ def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
+ return self._inject_tailmatch(
+ node, function, args, is_unbound_method, 'bytearray', 'startswith',
+ bytes_tailmatch_utility_code, -1)
+ '''
+
+ ### helpers
+
+ def _substitute_method_call(self, node, function, name, func_type,
+ attr_name, is_unbound_method, args=(),
+ utility_code=None, is_temp=None,
+ may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
+ with_none_check=True):
+ args = list(args)
+ if with_none_check and args:
+ args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
+ if is_temp is None:
+ is_temp = node.is_temp
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, name, func_type,
+ args = args,
+ is_temp = is_temp,
+ utility_code = utility_code,
+ may_return_none = may_return_none,
+ result_is_used = node.result_is_used,
+ )
+
+ def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
+ if self_arg.is_literal:
+ return self_arg
+ if is_unbound_method:
+ self_arg = self_arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[attr_name, self_arg.type.name])
+ else:
+ self_arg = self_arg.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
+ error="PyExc_AttributeError",
+ format_args=[attr_name])
+ return self_arg
+
+ def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
+ assert len(args) >= arg_index
+ if len(args) == arg_index:
+ args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
+ type=type, constant_result=default_value))
+ else:
+ args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
+
+ def _inject_bint_default_argument(self, node, args, arg_index, default_value):
+ assert len(args) >= arg_index
+ if len(args) == arg_index:
+ default_value = bool(default_value)
+ args.append(ExprNodes.BoolNode(node.pos, value=default_value,
+ constant_result=default_value))
+ else:
+ args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
+
+
+unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
+bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
+str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
+
+
+class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
+ """Calculate the result of constant expressions to store it in
+ ``expr_node.constant_result``, and replace trivial cases by their
+ constant result.
+
+ General rules:
+
+ - We calculate float constants to make them available to the
+ compiler, but we do not aggregate them into a single literal
+ node to prevent any loss of precision.
+
+ - We recursively calculate constants from non-literal nodes to
+ make them available to the compiler, but we only aggregate
+ literal nodes at each step. Non-literal nodes are never merged
+ into a single node.
+ """
+
+ def __init__(self, reevaluate=False):
+ """
+ The reevaluate argument specifies whether constant values that were
+ previously computed should be recomputed.
+ """
+ super(ConstantFolding, self).__init__()
+ self.reevaluate = reevaluate
+
+ def _calculate_const(self, node):
+ if (not self.reevaluate and
+ node.constant_result is not ExprNodes.constant_value_not_set):
+ return
+
+ # make sure we always set the value
+ not_a_constant = ExprNodes.not_a_constant
+ node.constant_result = not_a_constant
+
+ # check if all children are constant
+ children = self.visitchildren(node)
+ for child_result in children.values():
+ if type(child_result) is list:
+ for child in child_result:
+ if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
+ return
+ elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
+ return
+
+ # now try to calculate the real constant value
+ try:
+ node.calculate_constant_result()
+# if node.constant_result is not ExprNodes.not_a_constant:
+# print node.__class__.__name__, node.constant_result
+ except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
+ # ignore all 'normal' errors here => no constant result
+ pass
+ except Exception:
+ # this looks like a real error
+ import traceback, sys
+ traceback.print_exc(file=sys.stdout)
+
+ NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
+ ExprNodes.IntNode, ExprNodes.FloatNode]
+
+ def _widest_node_class(self, *nodes):
+ try:
+ return self.NODE_TYPE_ORDER[
+ max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
+ except ValueError:
+ return None
+
+ def _bool_node(self, node, value):
+ value = bool(value)
+ return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
+
+ def visit_ExprNode(self, node):
+ self._calculate_const(node)
+ return node
+
+ def visit_UnopNode(self, node):
+ self._calculate_const(node)
+ if not node.has_constant_result():
+ if node.operator == '!':
+ return self._handle_NotNode(node)
+ return node
+ if not node.operand.is_literal:
+ return node
+ if node.operator == '!':
+ return self._bool_node(node, node.constant_result)
+ elif isinstance(node.operand, ExprNodes.BoolNode):
+ return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
+ type=PyrexTypes.c_int_type,
+ constant_result=int(node.constant_result))
+ elif node.operator == '+':
+ return self._handle_UnaryPlusNode(node)
+ elif node.operator == '-':
+ return self._handle_UnaryMinusNode(node)
+ return node
+
+ _negate_operator = {
+ 'in': 'not_in',
+ 'not_in': 'in',
+ 'is': 'is_not',
+ 'is_not': 'is'
+ }.get
+
+ def _handle_NotNode(self, node):
+ operand = node.operand
+ if isinstance(operand, ExprNodes.PrimaryCmpNode):
+ operator = self._negate_operator(operand.operator)
+ if operator:
+ node = copy.copy(operand)
+ node.operator = operator
+ node = self.visit_PrimaryCmpNode(node)
+ return node
+
+ def _handle_UnaryMinusNode(self, node):
+ def _negate(value):
+ if value.startswith('-'):
+ value = value[1:]
+ else:
+ value = '-' + value
+ return value
+
+ node_type = node.operand.type
+ if isinstance(node.operand, ExprNodes.FloatNode):
+ # this is a safe operation
+ return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
+ type=node_type,
+ constant_result=node.constant_result)
+ if node_type.is_int and node_type.signed or \
+ isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
+ return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
+ type=node_type,
+ longness=node.operand.longness,
+ constant_result=node.constant_result)
+ return node
+
+ def _handle_UnaryPlusNode(self, node):
+ if (node.operand.has_constant_result() and
+ node.constant_result == node.operand.constant_result):
+ return node.operand
+ return node
+
+ def visit_BoolBinopNode(self, node):
+ self._calculate_const(node)
+ if not node.operand1.has_constant_result():
+ return node
+ if node.operand1.constant_result:
+ if node.operator == 'and':
+ return node.operand2
+ else:
+ return node.operand1
+ else:
+ if node.operator == 'and':
+ return node.operand1
+ else:
+ return node.operand2
+
+ def visit_BinopNode(self, node):
+ self._calculate_const(node)
+ if node.constant_result is ExprNodes.not_a_constant:
+ return node
+ if isinstance(node.constant_result, float):
+ return node
+ operand1, operand2 = node.operand1, node.operand2
+ if not operand1.is_literal or not operand2.is_literal:
+ return node
+
+ # now inject a new constant node with the calculated value
+ try:
+ type1, type2 = operand1.type, operand2.type
+ if type1 is None or type2 is None:
+ return node
+ except AttributeError:
+ return node
+
+ if type1.is_numeric and type2.is_numeric:
+ widest_type = PyrexTypes.widest_numeric_type(type1, type2)
+ else:
+ widest_type = PyrexTypes.py_object_type
+
+ target_class = self._widest_node_class(operand1, operand2)
+ if target_class is None:
+ return node
+ elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
+ # C arithmetic results in at least an int type
+ target_class = ExprNodes.IntNode
+ elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
+ # C arithmetic results in at least an int type
+ target_class = ExprNodes.IntNode
+
+ if target_class is ExprNodes.IntNode:
+ unsigned = getattr(operand1, 'unsigned', '') and \
+ getattr(operand2, 'unsigned', '')
+ longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
+ len(getattr(operand2, 'longness', '')))]
+ new_node = ExprNodes.IntNode(pos=node.pos,
+ unsigned=unsigned, longness=longness,
+ value=str(int(node.constant_result)),
+ constant_result=int(node.constant_result))
+ # IntNode is smart about the type it chooses, so we just
+ # make sure we were not smarter this time
+ if widest_type.is_pyobject or new_node.type.is_pyobject:
+ new_node.type = PyrexTypes.py_object_type
+ else:
+ new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
+ else:
+ if target_class is ExprNodes.BoolNode:
+ node_value = node.constant_result
+ else:
+ node_value = str(node.constant_result)
+ new_node = target_class(pos=node.pos, type = widest_type,
+ value = node_value,
+ constant_result = node.constant_result)
+ return new_node
+
+ def visit_AddNode(self, node):
+ self._calculate_const(node)
+ if node.constant_result is ExprNodes.not_a_constant:
+ return node
+ if node.operand1.is_string_literal and node.operand2.is_string_literal:
+ # some people combine string literals with a '+'
+ str1, str2 = node.operand1, node.operand2
+ if isinstance(str1, ExprNodes.UnicodeNode) and isinstance(str2, ExprNodes.UnicodeNode):
+ bytes_value = None
+ if str1.bytes_value is not None and str2.bytes_value is not None:
+ if str1.bytes_value.encoding == str2.bytes_value.encoding:
+ bytes_value = bytes_literal(
+ str1.bytes_value + str2.bytes_value,
+ str1.bytes_value.encoding)
+ string_value = EncodedString(node.constant_result)
+ return ExprNodes.UnicodeNode(
+ str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value)
+ elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode):
+ if str1.value.encoding == str2.value.encoding:
+ bytes_value = bytes_literal(node.constant_result, str1.value.encoding)
+ return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result)
+ # all other combinations are rather complicated
+ # to get right in Py2/3: encodings, unicode escapes, ...
+ return self.visit_BinopNode(node)
+
+ def visit_MulNode(self, node):
+ self._calculate_const(node)
+ if node.operand1.is_sequence_constructor:
+ return self._calculate_constant_seq(node, node.operand1, node.operand2)
+ if isinstance(node.operand1, ExprNodes.IntNode) and \
+ node.operand2.is_sequence_constructor:
+ return self._calculate_constant_seq(node, node.operand2, node.operand1)
+ if node.operand1.is_string_literal:
+ return self._multiply_string(node, node.operand1, node.operand2)
+ elif node.operand2.is_string_literal:
+ return self._multiply_string(node, node.operand2, node.operand1)
+ return self.visit_BinopNode(node)
+
+ def _multiply_string(self, node, string_node, multiplier_node):
+ multiplier = multiplier_node.constant_result
+ if not isinstance(multiplier, _py_int_types):
+ return node
+ if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
+ return node
+ if len(node.constant_result) > 256:
+ # Too long for static creation, leave it to runtime. (-> arbitrary limit)
+ return node
+
+ build_string = encoded_string
+ if isinstance(string_node, ExprNodes.BytesNode):
+ build_string = bytes_literal
+ elif isinstance(string_node, ExprNodes.StringNode):
+ if string_node.unicode_value is not None:
+ string_node.unicode_value = encoded_string(
+ string_node.unicode_value * multiplier,
+ string_node.unicode_value.encoding)
+ build_string = encoded_string if string_node.value.is_unicode else bytes_literal
+ elif isinstance(string_node, ExprNodes.UnicodeNode):
+ if string_node.bytes_value is not None:
+ string_node.bytes_value = bytes_literal(
+ string_node.bytes_value * multiplier,
+ string_node.bytes_value.encoding)
+ else:
+ assert False, "unknown string node type: %s" % type(string_node)
+ string_node.value = build_string(
+ string_node.value * multiplier,
+ string_node.value.encoding)
+ # follow constant-folding and use unicode_value in preference
+ if isinstance(string_node, ExprNodes.StringNode) and string_node.unicode_value is not None:
+ string_node.constant_result = string_node.unicode_value
+ else:
+ string_node.constant_result = string_node.value
+ return string_node
+
+ def _calculate_constant_seq(self, node, sequence_node, factor):
+ if factor.constant_result != 1 and sequence_node.args:
+ if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
+ del sequence_node.args[:]
+ sequence_node.mult_factor = None
+ elif sequence_node.mult_factor is not None:
+ if (isinstance(factor.constant_result, _py_int_types) and
+ isinstance(sequence_node.mult_factor.constant_result, _py_int_types)):
+ value = sequence_node.mult_factor.constant_result * factor.constant_result
+ sequence_node.mult_factor = ExprNodes.IntNode(
+ sequence_node.mult_factor.pos,
+ value=str(value), constant_result=value)
+ else:
+ # don't know if we can combine the factors, so don't
+ return self.visit_BinopNode(node)
+ else:
+ sequence_node.mult_factor = factor
+ return sequence_node
+
+ def visit_ModNode(self, node):
+ self.visitchildren(node)
+ if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
+ if not node.operand2.mult_factor:
+ fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
+ if fstring is not None:
+ return fstring
+ return self.visit_BinopNode(node)
+
+ _parse_string_format_regex = (
+ u'(%(?:' # %...
+ u'(?:[-0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
+ u'(?:[.][0-9]+)?' # precision (optional)
+ u')?.)' # format type (or something different for unsupported formats)
+ )
+
+ def _build_fstring(self, pos, ustring, format_args):
+ # Issues formatting warnings instead of errors since we really only catch a few errors by accident.
+ args = iter(format_args)
+ substrings = []
+ can_be_optimised = True
+ for s in re.split(self._parse_string_format_regex, ustring):
+ if not s:
+ continue
+ if s == u'%%':
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
+ continue
+ if s[0] != u'%':
+ if s[-1] == u'%':
+ warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
+ can_be_optimised = False
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
+ continue
+ format_type = s[-1]
+ try:
+ arg = next(args)
+ except StopIteration:
+ warning(pos, "Too few arguments for format placeholders", level=1)
+ can_be_optimised = False
+ break
+ if arg.is_starred:
+ can_be_optimised = False
+ break
+ if format_type in u'asrfdoxX':
+ format_spec = s[1:]
+ conversion_char = None
+ if format_type in u'doxX' and u'.' in format_spec:
+ # Precision is not allowed for integers in format(), but ok in %-formatting.
+ can_be_optimised = False
+ elif format_type in u'ars':
+ format_spec = format_spec[:-1]
+ conversion_char = format_type
+ if format_spec.startswith('0'):
+ format_spec = '>' + format_spec[1:] # right-alignment '%05s' spells '{:>5}'
+ elif format_type == u'd':
+ # '%d' formatting supports float, but '{obj:d}' does not => convert to int first.
+ conversion_char = 'd'
+
+ if format_spec.startswith('-'):
+ format_spec = '<' + format_spec[1:] # left-alignment '%-5s' spells '{:<5}'
+
+ substrings.append(ExprNodes.FormattedValueNode(
+ arg.pos, value=arg,
+ conversion_char=conversion_char,
+ format_spec=ExprNodes.UnicodeNode(
+ pos, value=EncodedString(format_spec), constant_result=format_spec)
+ if format_spec else None,
+ ))
+ else:
+ # keep it simple for now ...
+ can_be_optimised = False
+ break
+
+ if not can_be_optimised:
+ # Print all warnings we can find before finally giving up here.
+ return None
+
+ try:
+ next(args)
+ except StopIteration: pass
+ else:
+ warning(pos, "Too many arguments for format placeholders", level=1)
+ return None
+
+ node = ExprNodes.JoinedStrNode(pos, values=substrings)
+ return self.visit_JoinedStrNode(node)
+
+ def visit_FormattedValueNode(self, node):
+ self.visitchildren(node)
+ conversion_char = node.conversion_char or 's'
+ if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value:
+ node.format_spec = None
+ if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode):
+ value = EncodedString(node.value.value)
+ if value.isdigit():
+ return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
+ if node.format_spec is None and conversion_char == 's':
+ value = None
+ if isinstance(node.value, ExprNodes.UnicodeNode):
+ value = node.value.value
+ elif isinstance(node.value, ExprNodes.StringNode):
+ value = node.value.unicode_value
+ if value is not None:
+ return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
+ return node
+
+ def visit_JoinedStrNode(self, node):
+ """
+ Clean up after the parser by discarding empty Unicode strings and merging
+ substring sequences. Empty or single-value join lists are not uncommon
+ because f-string format specs are always parsed into JoinedStrNodes.
+ """
+ self.visitchildren(node)
+ unicode_node = ExprNodes.UnicodeNode
+
+ values = []
+ for is_unode_group, substrings in itertools.groupby(node.values, lambda v: isinstance(v, unicode_node)):
+ if is_unode_group:
+ substrings = list(substrings)
+ unode = substrings[0]
+ if len(substrings) > 1:
+ value = EncodedString(u''.join(value.value for value in substrings))
+ unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value)
+ # ignore empty Unicode strings
+ if unode.value:
+ values.append(unode)
+ else:
+ values.extend(substrings)
+
+ if not values:
+ value = EncodedString('')
+ node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value)
+ elif len(values) == 1:
+ node = values[0]
+ elif len(values) == 2:
+ # reduce to string concatenation
+ node = ExprNodes.binop_node(node.pos, '+', *values)
+ else:
+ node.values = values
+ return node
+
+ def visit_MergedDictNode(self, node):
+ """Unpack **args in place if we can."""
+ self.visitchildren(node)
+ args = []
+ items = []
+
+ def add(arg):
+ if arg.is_dict_literal:
+ if items:
+ items[0].key_value_pairs.extend(arg.key_value_pairs)
+ else:
+ items.append(arg)
+ elif isinstance(arg, ExprNodes.MergedDictNode):
+ for child_arg in arg.keyword_args:
+ add(child_arg)
+ else:
+ if items:
+ args.append(items[0])
+ del items[:]
+ args.append(arg)
+
+ for arg in node.keyword_args:
+ add(arg)
+ if items:
+ args.append(items[0])
+
+ if len(args) == 1:
+ arg = args[0]
+ if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode):
+ return arg
+ node.keyword_args[:] = args
+ self._calculate_const(node)
+ return node
+
+ def visit_MergedSequenceNode(self, node):
+ """Unpack *args in place if we can."""
+ self.visitchildren(node)
+
+ is_set = node.type is Builtin.set_type
+ args = []
+ values = []
+
+ def add(arg):
+ if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor):
+ if values:
+ values[0].args.extend(arg.args)
+ else:
+ values.append(arg)
+ elif isinstance(arg, ExprNodes.MergedSequenceNode):
+ for child_arg in arg.args:
+ add(child_arg)
+ else:
+ if values:
+ args.append(values[0])
+ del values[:]
+ args.append(arg)
+
+ for arg in node.args:
+ add(arg)
+ if values:
+ args.append(values[0])
+
+ if len(args) == 1:
+ arg = args[0]
+ if ((is_set and arg.is_set_literal) or
+ (arg.is_sequence_constructor and arg.type is node.type) or
+ isinstance(arg, ExprNodes.MergedSequenceNode)):
+ return arg
+ node.args[:] = args
+ self._calculate_const(node)
+ return node
+
+ def visit_SequenceNode(self, node):
+ """Unpack *args in place if we can."""
+ self.visitchildren(node)
+ args = []
+ for arg in node.args:
+ if not arg.is_starred:
+ args.append(arg)
+ elif arg.target.is_sequence_constructor and not arg.target.mult_factor:
+ args.extend(arg.target.args)
+ else:
+ args.append(arg)
+ node.args[:] = args
+ self._calculate_const(node)
+ return node
+
+ def visit_PrimaryCmpNode(self, node):
+ # calculate constant partial results in the comparison cascade
+ self.visitchildren(node, ['operand1'])
+ left_node = node.operand1
+ cmp_node = node
+ while cmp_node is not None:
+ self.visitchildren(cmp_node, ['operand2'])
+ right_node = cmp_node.operand2
+ cmp_node.constant_result = not_a_constant
+ if left_node.has_constant_result() and right_node.has_constant_result():
+ try:
+ cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
+ except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
+ pass # ignore all 'normal' errors here => no constant result
+ left_node = right_node
+ cmp_node = cmp_node.cascade
+
+ if not node.cascade:
+ if node.has_constant_result():
+ return self._bool_node(node, node.constant_result)
+ return node
+
+ # collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
+ cascades = [[node.operand1]]
+ final_false_result = []
+
+ def split_cascades(cmp_node):
+ if cmp_node.has_constant_result():
+ if not cmp_node.constant_result:
+ # False => short-circuit
+ final_false_result.append(self._bool_node(cmp_node, False))
+ return
+ else:
+ # True => discard and start new cascade
+ cascades.append([cmp_node.operand2])
+ else:
+ # not constant => append to current cascade
+ cascades[-1].append(cmp_node)
+ if cmp_node.cascade:
+ split_cascades(cmp_node.cascade)
+
+ split_cascades(node)
+
+ cmp_nodes = []
+ for cascade in cascades:
+ if len(cascade) < 2:
+ continue
+ cmp_node = cascade[1]
+ pcmp_node = ExprNodes.PrimaryCmpNode(
+ cmp_node.pos,
+ operand1=cascade[0],
+ operator=cmp_node.operator,
+ operand2=cmp_node.operand2,
+ constant_result=not_a_constant)
+ cmp_nodes.append(pcmp_node)
+
+ last_cmp_node = pcmp_node
+ for cmp_node in cascade[2:]:
+ last_cmp_node.cascade = cmp_node
+ last_cmp_node = cmp_node
+ last_cmp_node.cascade = None
+
+ if final_false_result:
+ # last cascade was constant False
+ cmp_nodes.append(final_false_result[0])
+ elif not cmp_nodes:
+ # only constants, but no False result
+ return self._bool_node(node, True)
+ node = cmp_nodes[0]
+ if len(cmp_nodes) == 1:
+ if node.has_constant_result():
+ return self._bool_node(node, node.constant_result)
+ else:
+ for cmp_node in cmp_nodes[1:]:
+ node = ExprNodes.BoolBinopNode(
+ node.pos,
+ operand1=node,
+ operator='and',
+ operand2=cmp_node,
+ constant_result=not_a_constant)
+ return node
+
+ def visit_CondExprNode(self, node):
+ self._calculate_const(node)
+ if not node.test.has_constant_result():
+ return node
+ if node.test.constant_result:
+ return node.true_val
+ else:
+ return node.false_val
+
+ def visit_IfStatNode(self, node):
+ self.visitchildren(node)
+ # eliminate dead code based on constant condition results
+ if_clauses = []
+ for if_clause in node.if_clauses:
+ condition = if_clause.condition
+ if condition.has_constant_result():
+ if condition.constant_result:
+ # always true => subsequent clauses can safely be dropped
+ node.else_clause = if_clause.body
+ break
+ # else: false => drop clause
+ else:
+ # unknown result => normal runtime evaluation
+ if_clauses.append(if_clause)
+ if if_clauses:
+ node.if_clauses = if_clauses
+ return node
+ elif node.else_clause:
+ return node.else_clause
+ else:
+ return Nodes.StatListNode(node.pos, stats=[])
+
+ def visit_SliceIndexNode(self, node):
+ self._calculate_const(node)
+ # normalise start/stop values
+ if node.start is None or node.start.constant_result is None:
+ start = node.start = None
+ else:
+ start = node.start.constant_result
+ if node.stop is None or node.stop.constant_result is None:
+ stop = node.stop = None
+ else:
+ stop = node.stop.constant_result
+ # cut down sliced constant sequences
+ if node.constant_result is not not_a_constant:
+ base = node.base
+ if base.is_sequence_constructor and base.mult_factor is None:
+ base.args = base.args[start:stop]
+ return base
+ elif base.is_string_literal:
+ base = base.as_sliced_node(start, stop)
+ if base is not None:
+ return base
+ return node
+
+ def visit_ComprehensionNode(self, node):
+ self.visitchildren(node)
+ if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
+ # loop was pruned already => transform into literal
+ if node.type is Builtin.list_type:
+ return ExprNodes.ListNode(
+ node.pos, args=[], constant_result=[])
+ elif node.type is Builtin.set_type:
+ return ExprNodes.SetNode(
+ node.pos, args=[], constant_result=set())
+ elif node.type is Builtin.dict_type:
+ return ExprNodes.DictNode(
+ node.pos, key_value_pairs=[], constant_result={})
+ return node
+
+ def visit_ForInStatNode(self, node):
+ self.visitchildren(node)
+ sequence = node.iterator.sequence
+ if isinstance(sequence, ExprNodes.SequenceNode):
+ if not sequence.args:
+ if node.else_clause:
+ return node.else_clause
+ else:
+ # don't break list comprehensions
+ return Nodes.StatListNode(node.pos, stats=[])
+ # iterating over a list literal? => tuples are more efficient
+ if isinstance(sequence, ExprNodes.ListNode):
+ node.iterator.sequence = sequence.as_tuple()
+ return node
+
+ def visit_WhileStatNode(self, node):
+ self.visitchildren(node)
+ if node.condition and node.condition.has_constant_result():
+ if node.condition.constant_result:
+ node.condition = None
+ node.else_clause = None
+ else:
+ return node.else_clause
+ return node
+
+ def visit_ExprStatNode(self, node):
+ self.visitchildren(node)
+ if not isinstance(node.expr, ExprNodes.ExprNode):
+ # ParallelRangeTransform does this ...
+ return node
+ # drop unused constant expressions
+ if node.expr.has_constant_result():
+ return None
+ return node
+
+ # in the future, other nodes can have their own handler method here
+ # that can replace them with a constant result node
+
+ visit_Node = Visitor.VisitorTransform.recurse_to_children
+
+
+class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
+ """
+ This visitor handles several commuting optimizations, and is run
+ just before the C code generation phase.
+
+ The optimizations currently implemented in this class are:
+ - eliminate None assignment and refcounting for first assignment.
+ - isinstance -> typecheck for cdef types
+ - eliminate checks for None and/or types that became redundant after tree changes
+ - eliminate useless string formatting steps
+ - replace Python function calls that look like method calls by a faster PyMethodCallNode
+ """
+ in_loop = False
+
+ def visit_SingleAssignmentNode(self, node):
+ """Avoid redundant initialisation of local variables before their
+ first assignment.
+ """
+ self.visitchildren(node)
+ if node.first:
+ lhs = node.lhs
+ lhs.lhs_of_first_assignment = True
+ return node
+
+ def visit_SimpleCallNode(self, node):
+ """
+ Replace generic calls to isinstance(x, type) by a more efficient type check.
+ Replace likely Python method calls by a specialised PyMethodCallNode.
+ """
+ self.visitchildren(node)
+ function = node.function
+ if function.type.is_cfunction and function.is_name:
+ if function.name == 'isinstance' and len(node.args) == 2:
+ type_arg = node.args[1]
+ if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
+ cython_scope = self.context.cython_scope
+ function.entry = cython_scope.lookup('PyObject_TypeCheck')
+ function.type = function.entry.type
+ PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
+ node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
+ elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
+ "optimize.unpack_method_calls_in_pyinit"
+ if not self.in_loop and self.current_env().is_module_scope
+ else "optimize.unpack_method_calls")):
+ # optimise simple Python methods calls
+ if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
+ node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and len(node.arg_tuple.args) > 1)):
+ # simple call, now exclude calls to objects that are definitely not methods
+ may_be_a_method = True
+ if function.type is Builtin.type_type:
+ may_be_a_method = False
+ elif function.is_attribute:
+ if function.entry and function.entry.type.is_cfunction:
+ # optimised builtin method
+ may_be_a_method = False
+ elif function.is_name:
+ entry = function.entry
+ if entry.is_builtin or entry.type.is_cfunction:
+ may_be_a_method = False
+ elif entry.cf_assignments:
+ # local functions/classes are definitely not methods
+ non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
+ may_be_a_method = any(
+ assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
+ for assignment in entry.cf_assignments)
+ if may_be_a_method:
+ if (node.self and function.is_attribute and
+ isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self):
+ # function self object was moved into a CloneNode => undo
+ function.obj = function.obj.arg
+ node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
+ node, function=function, arg_tuple=node.arg_tuple, type=node.type))
+ return node
+
+ def visit_NumPyMethodCallNode(self, node):
+ # Exclude from replacement above.
+ self.visitchildren(node)
+ return node
+
+ def visit_PyTypeTestNode(self, node):
+ """Remove tests for alternatively allowed None values from
+ type tests when we know that the argument cannot be None
+ anyway.
+ """
+ self.visitchildren(node)
+ if not node.notnone:
+ if not node.arg.may_be_none():
+ node.notnone = True
+ return node
+
+ def visit_NoneCheckNode(self, node):
+ """Remove None checks from expressions that definitely do not
+ carry a None value.
+ """
+ self.visitchildren(node)
+ if not node.arg.may_be_none():
+ return node.arg
+ return node
+
+ def visit_LoopNode(self, node):
+ """Remember when we enter a loop as some expensive optimisations might still be worth it there.
+ """
+ old_val = self.in_loop
+ self.in_loop = True
+ self.visitchildren(node)
+ self.in_loop = old_val
+ return node
+
+
+class ConsolidateOverflowCheck(Visitor.CythonTransform):
+ """
+ This class facilitates the sharing of overflow checking among all nodes
+ of a nested arithmetic expression. For example, given the expression
+ a*b + c, where a, b, and x are all possibly overflowing ints, the entire
+ sequence will be evaluated and the overflow bit checked only at the end.
+ """
+ overflow_bit_node = None
+
+ def visit_Node(self, node):
+ if self.overflow_bit_node is not None:
+ saved = self.overflow_bit_node
+ self.overflow_bit_node = None
+ self.visitchildren(node)
+ self.overflow_bit_node = saved
+ else:
+ self.visitchildren(node)
+ return node
+
+ def visit_NumBinopNode(self, node):
+ if node.overflow_check and node.overflow_fold:
+ top_level_overflow = self.overflow_bit_node is None
+ if top_level_overflow:
+ self.overflow_bit_node = node
+ else:
+ node.overflow_bit_node = self.overflow_bit_node
+ node.overflow_check = False
+ self.visitchildren(node)
+ if top_level_overflow:
+ self.overflow_bit_node = None
+ else:
+ self.visitchildren(node)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/Options.py b/contrib/tools/cython/Cython/Compiler/Options.py
new file mode 100644
index 0000000000..6c9103bb14
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Options.py
@@ -0,0 +1,555 @@
+#
+# Cython - Compilation-wide options and pragma declarations
+#
+
+from __future__ import absolute_import
+
+
+class ShouldBeFromDirective(object):
+
+ known_directives = []
+
+ def __init__(self, options_name, directive_name=None, disallow=False):
+ self.options_name = options_name
+ self.directive_name = directive_name or options_name
+ self.disallow = disallow
+ self.known_directives.append(self)
+
+ def __nonzero__(self):
+ self._bad_access()
+
+ def __int__(self):
+ self._bad_access()
+
+ def _bad_access(self):
+ raise RuntimeError(repr(self))
+
+ def __repr__(self):
+ return (
+ "Illegal access of '%s' from Options module rather than directive '%s'"
+ % (self.options_name, self.directive_name))
+
+
+"""
+The members of this module are documented using autodata in
+Cython/docs/src/reference/compilation.rst.
+See http://www.sphinx-doc.org/en/master/ext/autodoc.html#directive-autoattribute
+for how autodata works.
+Descriptions of those members should start with a #:
+Donc forget to keep the docs in sync by removing and adding
+the members in both this file and the .rst file.
+"""
+
+#: Whether or not to include docstring in the Python extension. If False, the binary size
+#: will be smaller, but the ``__doc__`` attribute of any class or function will be an
+#: empty string.
+docstrings = True
+
+#: Embed the source code position in the docstrings of functions and classes.
+embed_pos_in_docstring = False
+
+#: Copy the original source code line by line into C code comments
+#: in the generated code file to help with understanding the output.
+#: This is also required for coverage analysis.
+emit_code_comments = True
+
+# undocumented
+pre_import = None
+
+#: Decref global variables in each module on exit for garbage collection.
+#: 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
+#: Mostly for reducing noise in Valgrind as it typically executes at process exit
+#: (when all memory will be reclaimed anyways).
+#: Note that directly or indirectly executed cleanup code that makes use of global
+#: variables or types may no longer be safe when enabling the respective level since
+#: there is no guaranteed order in which the (reference counted) objects will
+#: be cleaned up. The order can change due to live references and reference cycles.
+generate_cleanup_code = False
+
+#: Should tp_clear() set object fields to None instead of clearing them to NULL?
+clear_to_none = True
+
+#: Generate an annotated HTML version of the input source files for debugging and optimisation purposes.
+#: This has the same effect as the ``annotate`` argument in :func:`cythonize`.
+annotate = False
+
+# When annotating source files in HTML, include coverage information from
+# this file.
+annotate_coverage_xml = None
+
+#: This will abort the compilation on the first error occurred rather than trying
+#: to keep going and printing further error messages.
+fast_fail = False
+
+#: Turn all warnings into errors.
+warning_errors = False
+
+#: Make unknown names an error. Python raises a NameError when
+#: encountering unknown names at runtime, whereas this option makes
+#: them a compile time error. If you want full Python compatibility,
+#: you should disable this option and also 'cache_builtins'.
+error_on_unknown_names = True
+
+#: Make uninitialized local variable reference a compile time error.
+#: Python raises UnboundLocalError at runtime, whereas this option makes
+#: them a compile time error. Note that this option affects only variables
+#: of "python object" type.
+error_on_uninitialized = True
+
+#: This will convert statements of the form ``for i in range(...)``
+#: to ``for i from ...`` when ``i`` is a C integer type, and the direction
+#: (i.e. sign of step) can be determined.
+#: WARNING: This may change the semantics if the range causes assignment to
+#: i to overflow. Specifically, if this option is set, an error will be
+#: raised before the loop is entered, whereas without this option the loop
+#: will execute until an overflowing value is encountered.
+convert_range = True
+
+#: Perform lookups on builtin names only once, at module initialisation
+#: time. This will prevent the module from getting imported if a
+#: builtin name that it uses cannot be found during initialisation.
+#: Default is True.
+#: Note that some legacy builtins are automatically remapped
+#: from their Python 2 names to their Python 3 names by Cython
+#: when building in Python 3.x,
+#: so that they do not get in the way even if this option is enabled.
+cache_builtins = True
+
+#: Generate branch prediction hints to speed up error handling etc.
+gcc_branch_hints = True
+
+#: Enable this to allow one to write ``your_module.foo = ...`` to overwrite the
+#: definition if the cpdef function foo, at the cost of an extra dictionary
+#: lookup on every call.
+#: If this is false it generates only the Python wrapper and no override check.
+lookup_module_cpdef = False
+
+#: Whether or not to embed the Python interpreter, for use in making a
+#: standalone executable or calling from external libraries.
+#: This will provide a C function which initialises the interpreter and
+#: executes the body of this module.
+#: See `this demo <https://github.com/cython/cython/tree/master/Demos/embed>`_
+#: for a concrete example.
+#: If true, the initialisation function is the C main() function, but
+#: this option can also be set to a non-empty string to provide a function name explicitly.
+#: Default is False.
+embed = None
+
+# In previous iterations of Cython, globals() gave the first non-Cython module
+# globals in the call stack. Sage relies on this behavior for variable injection.
+old_style_globals = ShouldBeFromDirective('old_style_globals')
+
+#: Allows cimporting from a pyx file without a pxd file.
+cimport_from_pyx = False
+
+#: Maximum number of dimensions for buffers -- set lower than number of
+#: dimensions in numpy, as
+#: slices are passed by value and involve a lot of copying.
+buffer_max_dims = 8
+
+#: Number of function closure instances to keep in a freelist (0: no freelists)
+closure_freelist_size = 8
+
+# Arcadia specific
+source_root = None
+
+
+def get_directive_defaults():
+ # To add an item to this list, all accesses should be changed to use the new
+ # directive, and the global option itself should be set to an instance of
+ # ShouldBeFromDirective.
+ for old_option in ShouldBeFromDirective.known_directives:
+ value = globals().get(old_option.options_name)
+ assert old_option.directive_name in _directive_defaults
+ if not isinstance(value, ShouldBeFromDirective):
+ if old_option.disallow:
+ raise RuntimeError(
+ "Option '%s' must be set from directive '%s'" % (
+ old_option.option_name, old_option.directive_name))
+ else:
+ # Warn?
+ _directive_defaults[old_option.directive_name] = value
+ return _directive_defaults
+
+# Declare compiler directives
+_directive_defaults = {
+ 'boundscheck' : True,
+ 'nonecheck' : False,
+ 'initializedcheck' : True,
+ 'embedsignature' : False,
+ 'auto_cpdef': False,
+ 'auto_pickle': None,
+ 'cdivision': False, # was True before 0.12
+ 'cdivision_warnings': False,
+ 'c_api_binop_methods': True,
+ 'cpow': True,
+ 'overflowcheck': False,
+ 'overflowcheck.fold': True,
+ 'always_allow_keywords': False,
+ 'allow_none_for_extension_args': True,
+ 'wraparound' : True,
+ 'ccomplex' : False, # use C99/C++ for complex types and arith
+ 'callspec' : "",
+ 'nogil' : False,
+ 'profile': False,
+ 'linetrace': False,
+ 'emit_code_comments': True, # copy original source code into C code comments
+ 'annotation_typing': True, # read type declarations from Python function annotations
+ 'infer_types': None,
+ 'infer_types.verbose': False,
+ 'autotestdict': True,
+ 'autotestdict.cdef': False,
+ 'autotestdict.all': False,
+ 'language_level': None,
+ 'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
+ 'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
+ 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079).
+ 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax.
+ 'c_string_type': 'bytes',
+ 'c_string_encoding': '',
+ 'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
+ 'unraisable_tracebacks': True,
+ 'old_style_globals': False,
+ 'np_pythran': False,
+ 'fast_gil': False,
+
+ # set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
+ 'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
+
+ 'warn': None,
+ 'warn.undeclared': False,
+ 'warn.unreachable': True,
+ 'warn.maybe_uninitialized': False,
+ 'warn.unused': False,
+ 'warn.unused_arg': False,
+ 'warn.unused_result': False,
+ 'warn.multiple_declarators': True,
+
+# optimizations
+ 'optimize.inline_defnode_calls': True,
+ 'optimize.unpack_method_calls': True, # increases code size when True
+ 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True
+ 'optimize.use_switch': True,
+
+# remove unreachable code
+ 'remove_unreachable': True,
+
+# control flow debug directives
+ 'control_flow.dot_output': "", # Graphviz output filename
+ 'control_flow.dot_annotate_defs': False, # Annotate definitions
+
+# test support
+ 'test_assert_path_exists' : [],
+ 'test_fail_if_path_exists' : [],
+
+# experimental, subject to change
+ 'binding': None,
+
+ 'formal_grammar': False,
+}
+
+# Extra warning directives
+extra_warnings = {
+ 'warn.maybe_uninitialized': True,
+ 'warn.unreachable': True,
+ 'warn.unused': True,
+}
+
+def one_of(*args):
+ def validate(name, value):
+ if value not in args:
+ raise ValueError("%s directive must be one of %s, got '%s'" % (
+ name, args, value))
+ else:
+ return value
+ return validate
+
+
+def normalise_encoding_name(option_name, encoding):
+ """
+ >>> normalise_encoding_name('c_string_encoding', 'ascii')
+ 'ascii'
+ >>> normalise_encoding_name('c_string_encoding', 'AsCIi')
+ 'ascii'
+ >>> normalise_encoding_name('c_string_encoding', 'us-ascii')
+ 'ascii'
+ >>> normalise_encoding_name('c_string_encoding', 'utF8')
+ 'utf8'
+ >>> normalise_encoding_name('c_string_encoding', 'utF-8')
+ 'utf8'
+ >>> normalise_encoding_name('c_string_encoding', 'deFAuLT')
+ 'default'
+ >>> normalise_encoding_name('c_string_encoding', 'default')
+ 'default'
+ >>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding')
+ 'SeriousLyNoSuch--Encoding'
+ """
+ if not encoding:
+ return ''
+ if encoding.lower() in ('default', 'ascii', 'utf8'):
+ return encoding.lower()
+ import codecs
+ try:
+ decoder = codecs.getdecoder(encoding)
+ except LookupError:
+ return encoding # may exists at runtime ...
+ for name in ('ascii', 'utf8'):
+ if codecs.getdecoder(name) == decoder:
+ return name
+ return encoding
+
+
+# Override types possibilities above, if needed
+directive_types = {
+ 'language_level': str, # values can be None/2/3/'3str', where None == 2+warning
+ 'auto_pickle': bool,
+ 'locals': dict,
+ 'final' : bool, # final cdef classes and methods
+ 'nogil' : bool,
+ 'internal' : bool, # cdef class visibility in the module dict
+ 'infer_types' : bool, # values can be True/None/False
+ 'binding' : bool,
+ 'cfunc' : None, # decorators do not take directive value
+ 'ccall' : None,
+ 'inline' : None,
+ 'staticmethod' : None,
+ 'cclass' : None,
+ 'no_gc_clear' : bool,
+ 'no_gc' : bool,
+ 'returns' : type,
+ 'exceptval': type, # actually (type, check=True/False), but has its own parser
+ 'set_initial_path': str,
+ 'freelist': int,
+ 'c_string_type': one_of('bytes', 'bytearray', 'str', 'unicode'),
+ 'c_string_encoding': normalise_encoding_name,
+ 'cpow': bool
+}
+
+for key, val in _directive_defaults.items():
+ if key not in directive_types:
+ directive_types[key] = type(val)
+
+directive_scopes = { # defaults to available everywhere
+ # 'module', 'function', 'class', 'with statement'
+ 'auto_pickle': ('module', 'cclass'),
+ 'final' : ('cclass', 'function'),
+ 'nogil' : ('function', 'with statement'),
+ 'inline' : ('function',),
+ 'cfunc' : ('function', 'with statement'),
+ 'ccall' : ('function', 'with statement'),
+ 'returns' : ('function',),
+ 'exceptval' : ('function',),
+ 'locals' : ('function',),
+ 'staticmethod' : ('function',), # FIXME: analysis currently lacks more specific function scope
+ 'no_gc_clear' : ('cclass',),
+ 'no_gc' : ('cclass',),
+ 'internal' : ('cclass',),
+ 'cclass' : ('class', 'cclass', 'with statement'),
+ 'autotestdict' : ('module',),
+ 'autotestdict.all' : ('module',),
+ 'autotestdict.cdef' : ('module',),
+ 'set_initial_path' : ('module',),
+ 'test_assert_path_exists' : ('function', 'class', 'cclass'),
+ 'test_fail_if_path_exists' : ('function', 'class', 'cclass'),
+ 'freelist': ('cclass',),
+ 'emit_code_comments': ('module',),
+ 'annotation_typing': ('module',), # FIXME: analysis currently lacks more specific function scope
+ # Avoid scope-specific to/from_py_functions for c_string.
+ 'c_string_type': ('module',),
+ 'c_string_encoding': ('module',),
+ 'type_version_tag': ('module', 'cclass'),
+ 'language_level': ('module',),
+ # globals() could conceivably be controlled at a finer granularity,
+ # but that would complicate the implementation
+ 'old_style_globals': ('module',),
+ 'np_pythran': ('module',),
+ 'fast_gil': ('module',),
+ 'iterable_coroutine': ('module', 'function'),
+}
+
+
+def parse_directive_value(name, value, relaxed_bool=False):
+ """
+ Parses value as an option value for the given name and returns
+ the interpreted value. None is returned if the option does not exist.
+
+ >>> print(parse_directive_value('nonexisting', 'asdf asdfd'))
+ None
+ >>> parse_directive_value('boundscheck', 'True')
+ True
+ >>> parse_directive_value('boundscheck', 'true')
+ Traceback (most recent call last):
+ ...
+ ValueError: boundscheck directive must be set to True or False, got 'true'
+
+ >>> parse_directive_value('c_string_encoding', 'us-ascii')
+ 'ascii'
+ >>> parse_directive_value('c_string_type', 'str')
+ 'str'
+ >>> parse_directive_value('c_string_type', 'bytes')
+ 'bytes'
+ >>> parse_directive_value('c_string_type', 'bytearray')
+ 'bytearray'
+ >>> parse_directive_value('c_string_type', 'unicode')
+ 'unicode'
+ >>> parse_directive_value('c_string_type', 'unnicode')
+ Traceback (most recent call last):
+ ValueError: c_string_type directive must be one of ('bytes', 'bytearray', 'str', 'unicode'), got 'unnicode'
+ """
+ type = directive_types.get(name)
+ if not type:
+ return None
+ orig_value = value
+ if type is bool:
+ value = str(value)
+ if value == 'True':
+ return True
+ if value == 'False':
+ return False
+ if relaxed_bool:
+ value = value.lower()
+ if value in ("true", "yes"):
+ return True
+ elif value in ("false", "no"):
+ return False
+ raise ValueError("%s directive must be set to True or False, got '%s'" % (
+ name, orig_value))
+ elif type is int:
+ try:
+ return int(value)
+ except ValueError:
+ raise ValueError("%s directive must be set to an integer, got '%s'" % (
+ name, orig_value))
+ elif type is str:
+ return str(value)
+ elif callable(type):
+ return type(name, value)
+ else:
+ assert False
+
+
+def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False,
+ current_settings=None):
+ """
+ Parses a comma-separated list of pragma options. Whitespace
+ is not considered.
+
+ >>> parse_directive_list(' ')
+ {}
+ >>> (parse_directive_list('boundscheck=True') ==
+ ... {'boundscheck': True})
+ True
+ >>> parse_directive_list(' asdf')
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected "=" in option "asdf"
+ >>> parse_directive_list('boundscheck=hey')
+ Traceback (most recent call last):
+ ...
+ ValueError: boundscheck directive must be set to True or False, got 'hey'
+ >>> parse_directive_list('unknown=True')
+ Traceback (most recent call last):
+ ...
+ ValueError: Unknown option: "unknown"
+ >>> warnings = parse_directive_list('warn.all=True')
+ >>> len(warnings) > 1
+ True
+ >>> sum(warnings.values()) == len(warnings) # all true.
+ True
+ """
+ if current_settings is None:
+ result = {}
+ else:
+ result = current_settings
+ for item in s.split(','):
+ item = item.strip()
+ if not item:
+ continue
+ if '=' not in item:
+ raise ValueError('Expected "=" in option "%s"' % item)
+ name, value = [s.strip() for s in item.strip().split('=', 1)]
+ if name not in _directive_defaults:
+ found = False
+ if name.endswith('.all'):
+ prefix = name[:-3]
+ for directive in _directive_defaults:
+ if directive.startswith(prefix):
+ found = True
+ parsed_value = parse_directive_value(directive, value, relaxed_bool=relaxed_bool)
+ result[directive] = parsed_value
+ if not found and not ignore_unknown:
+ raise ValueError('Unknown option: "%s"' % name)
+ else:
+ parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool)
+ result[name] = parsed_value
+ return result
+
+
+def parse_variable_value(value):
+ """
+ Parses value as an option value for the given name and returns
+ the interpreted value.
+
+ >>> parse_variable_value('True')
+ True
+ >>> parse_variable_value('true')
+ 'true'
+ >>> parse_variable_value('us-ascii')
+ 'us-ascii'
+ >>> parse_variable_value('str')
+ 'str'
+ >>> parse_variable_value('123')
+ 123
+ >>> parse_variable_value('1.23')
+ 1.23
+
+ """
+ if value == "True":
+ return True
+ elif value == "False":
+ return False
+ elif value == "None":
+ return None
+ elif value.isdigit():
+ return int(value)
+ else:
+ try:
+ value = float(value)
+ except Exception:
+ # Not a float
+ pass
+ return value
+
+
+def parse_compile_time_env(s, current_settings=None):
+ """
+ Parses a comma-separated list of pragma options. Whitespace
+ is not considered.
+
+ >>> parse_compile_time_env(' ')
+ {}
+ >>> (parse_compile_time_env('HAVE_OPENMP=True') ==
+ ... {'HAVE_OPENMP': True})
+ True
+ >>> parse_compile_time_env(' asdf')
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected "=" in option "asdf"
+ >>> parse_compile_time_env('NUM_THREADS=4') == {'NUM_THREADS': 4}
+ True
+ >>> parse_compile_time_env('unknown=anything') == {'unknown': 'anything'}
+ True
+ """
+ if current_settings is None:
+ result = {}
+ else:
+ result = current_settings
+ for item in s.split(','):
+ item = item.strip()
+ if not item:
+ continue
+ if '=' not in item:
+ raise ValueError('Expected "=" in option "%s"' % item)
+ name, value = [s.strip() for s in item.split('=', 1)]
+ result[name] = parse_variable_value(value)
+ return result
diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd
new file mode 100644
index 0000000000..2c17901fa4
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd
@@ -0,0 +1,82 @@
+
+from __future__ import absolute_import
+
+cimport cython
+
+from .Visitor cimport (
+ CythonTransform, VisitorTransform, TreeVisitor,
+ ScopeTrackingTransform, EnvTransform)
+
+cdef class SkipDeclarations: # (object):
+ pass
+
+cdef class NormalizeTree(CythonTransform):
+ cdef bint is_in_statlist
+ cdef bint is_in_expr
+ cpdef visit_StatNode(self, node, is_listcontainer=*)
+
+cdef class PostParse(ScopeTrackingTransform):
+ cdef dict specialattribute_handlers
+ cdef size_t lambda_counter
+ cdef size_t genexpr_counter
+ cdef _visit_assignment_node(self, node, list expr_list)
+
+
+#def eliminate_rhs_duplicates(list expr_list_list, list ref_node_sequence)
+#def sort_common_subsequences(list items)
+@cython.locals(starred_targets=Py_ssize_t, lhs_size=Py_ssize_t, rhs_size=Py_ssize_t)
+cdef flatten_parallel_assignments(list input, list output)
+cdef map_starred_assignment(list lhs_targets, list starred_assignments, list lhs_args, list rhs_args)
+
+#class PxdPostParse(CythonTransform, SkipDeclarations):
+#class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
+#class WithTransform(CythonTransform, SkipDeclarations):
+#class DecoratorTransform(CythonTransform, SkipDeclarations):
+
+#class AnalyseDeclarationsTransform(EnvTransform):
+
+cdef class AnalyseExpressionsTransform(CythonTransform):
+ pass
+
+cdef class ExpandInplaceOperators(EnvTransform):
+ pass
+
+cdef class AlignFunctionDefinitions(CythonTransform):
+ cdef dict directives
+ cdef set imported_names
+ cdef object scope
+
+@cython.final
+cdef class YieldNodeCollector(TreeVisitor):
+ cdef public list yields
+ cdef public list returns
+ cdef public list finallys
+ cdef public list excepts
+ cdef public bint has_return_value
+ cdef public bint has_yield
+ cdef public bint has_await
+
+@cython.final
+cdef class MarkClosureVisitor(CythonTransform):
+ cdef bint needs_closure
+
+@cython.final
+cdef class CreateClosureClasses(CythonTransform):
+ cdef list path
+ cdef bint in_lambda
+ cdef module_scope
+ cdef generator_class
+
+ cdef create_class_from_scope(self, node, target_module_scope, inner_node=*)
+ cdef find_entries_used_in_closures(self, node)
+
+#cdef class InjectGilHandling(VisitorTransform, SkipDeclarations):
+# cdef bint nogil
+
+cdef class GilCheck(VisitorTransform):
+ cdef list env_stack
+ cdef bint nogil
+ cdef bint nogil_declarator_only
+
+cdef class TransformBuiltinMethods(EnvTransform):
+ cdef visit_cython_attribute(self, node)
diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
new file mode 100644
index 0000000000..0e86d5b0e8
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
@@ -0,0 +1,3535 @@
+from __future__ import absolute_import
+
+import cython
+cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
+ Options=object, UtilNodes=object, LetNode=object,
+ LetRefNode=object, TreeFragment=object, EncodedString=object,
+ error=object, warning=object, copy=object, hashlib=object, sys=object,
+ _unicode=object)
+
+import copy
+import hashlib
+import sys
+
+from . import PyrexTypes
+from . import Naming
+from . import ExprNodes
+from . import Nodes
+from . import Options
+from . import Builtin
+from . import Errors
+
+from .Visitor import VisitorTransform, TreeVisitor
+from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
+from .UtilNodes import LetNode, LetRefNode
+from .TreeFragment import TreeFragment
+from .StringEncoding import EncodedString, _unicode
+from .Errors import error, warning, CompileError, InternalError
+from .Code import UtilityCode
+
+
+class SkipDeclarations(object):
+ """
+ Variable and function declarations can often have a deep tree structure,
+ and yet most transformations don't need to descend to this depth.
+
+ Declaration nodes are removed after AnalyseDeclarationsTransform, so there
+ is no need to use this for transformations after that point.
+ """
+ def visit_CTypeDefNode(self, node):
+ return node
+
+ def visit_CVarDefNode(self, node):
+ return node
+
+ def visit_CDeclaratorNode(self, node):
+ return node
+
+ def visit_CBaseTypeNode(self, node):
+ return node
+
+ def visit_CEnumDefNode(self, node):
+ return node
+
+ def visit_CStructOrUnionDefNode(self, node):
+ return node
+
+
+class NormalizeTree(CythonTransform):
+ """
+ This transform fixes up a few things after parsing
+ in order to make the parse tree more suitable for
+ transforms.
+
+ a) After parsing, blocks with only one statement will
+ be represented by that statement, not by a StatListNode.
+ When doing transforms this is annoying and inconsistent,
+ as one cannot in general remove a statement in a consistent
+ way and so on. This transform wraps any single statements
+ in a StatListNode containing a single statement.
+
+ b) The PassStatNode is a noop and serves no purpose beyond
+ plugging such one-statement blocks; i.e., once parsed a
+` "pass" can just as well be represented using an empty
+ StatListNode. This means less special cases to worry about
+ in subsequent transforms (one always checks to see if a
+ StatListNode has no children to see if the block is empty).
+ """
+
+ def __init__(self, context):
+ super(NormalizeTree, self).__init__(context)
+ self.is_in_statlist = False
+ self.is_in_expr = False
+
+ def visit_ExprNode(self, node):
+ stacktmp = self.is_in_expr
+ self.is_in_expr = True
+ self.visitchildren(node)
+ self.is_in_expr = stacktmp
+ return node
+
+ def visit_StatNode(self, node, is_listcontainer=False):
+ stacktmp = self.is_in_statlist
+ self.is_in_statlist = is_listcontainer
+ self.visitchildren(node)
+ self.is_in_statlist = stacktmp
+ if not self.is_in_statlist and not self.is_in_expr:
+ return Nodes.StatListNode(pos=node.pos, stats=[node])
+ else:
+ return node
+
+ def visit_StatListNode(self, node):
+ self.is_in_statlist = True
+ self.visitchildren(node)
+ self.is_in_statlist = False
+ return node
+
+ def visit_ParallelAssignmentNode(self, node):
+ return self.visit_StatNode(node, True)
+
+ def visit_CEnumDefNode(self, node):
+ return self.visit_StatNode(node, True)
+
+ def visit_CStructOrUnionDefNode(self, node):
+ return self.visit_StatNode(node, True)
+
+ def visit_PassStatNode(self, node):
+ """Eliminate PassStatNode"""
+ if not self.is_in_statlist:
+ return Nodes.StatListNode(pos=node.pos, stats=[])
+ else:
+ return []
+
+ def visit_ExprStatNode(self, node):
+ """Eliminate useless string literals"""
+ if node.expr.is_string_literal:
+ return self.visit_PassStatNode(node)
+ else:
+ return self.visit_StatNode(node)
+
+ def visit_CDeclaratorNode(self, node):
+ return node
+
+
+class PostParseError(CompileError): pass
+
+# error strings checked by unit tests, so define them
+ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
+ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
+ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
+class PostParse(ScopeTrackingTransform):
+ """
+ Basic interpretation of the parse tree, as well as validity
+ checking that can be done on a very basic level on the parse
+ tree (while still not being a problem with the basic syntax,
+ as such).
+
+ Specifically:
+ - Default values to cdef assignments are turned into single
+ assignments following the declaration (everywhere but in class
+ bodies, where they raise a compile error)
+
+ - Interpret some node structures into Python runtime values.
+ Some nodes take compile-time arguments (currently:
+ TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
+ which should be interpreted. This happens in a general way
+ and other steps should be taken to ensure validity.
+
+ Type arguments cannot be interpreted in this way.
+
+ - For __cythonbufferdefaults__ the arguments are checked for
+ validity.
+
+ TemplatedTypeNode has its directives interpreted:
+ Any first positional argument goes into the "dtype" attribute,
+ any "ndim" keyword argument goes into the "ndim" attribute and
+ so on. Also it is checked that the directive combination is valid.
+ - __cythonbufferdefaults__ attributes are parsed and put into the
+ type information.
+
+ Note: Currently Parsing.py does a lot of interpretation and
+ reorganization that can be refactored into this transform
+ if a more pure Abstract Syntax Tree is wanted.
+ """
+
+ def __init__(self, context):
+ super(PostParse, self).__init__(context)
+ self.specialattribute_handlers = {
+ '__cythonbufferdefaults__' : self.handle_bufferdefaults
+ }
+
+ def visit_LambdaNode(self, node):
+ # unpack a lambda expression into the corresponding DefNode
+ collector = YieldNodeCollector()
+ collector.visitchildren(node.result_expr)
+ if collector.has_yield or collector.has_await or isinstance(node.result_expr, ExprNodes.YieldExprNode):
+ body = Nodes.ExprStatNode(
+ node.result_expr.pos, expr=node.result_expr)
+ else:
+ body = Nodes.ReturnStatNode(
+ node.result_expr.pos, value=node.result_expr)
+ node.def_node = Nodes.DefNode(
+ node.pos, name=node.name,
+ args=node.args, star_arg=node.star_arg,
+ starstar_arg=node.starstar_arg,
+ body=body, doc=None)
+ self.visitchildren(node)
+ return node
+
+ def visit_GeneratorExpressionNode(self, node):
+ # unpack a generator expression into the corresponding DefNode
+ collector = YieldNodeCollector()
+ collector.visitchildren(node.loop)
+ node.def_node = Nodes.DefNode(
+ node.pos, name=node.name, doc=None,
+ args=[], star_arg=None, starstar_arg=None,
+ body=node.loop, is_async_def=collector.has_await)
+ self.visitchildren(node)
+ return node
+
+ def visit_ComprehensionNode(self, node):
+ # enforce local scope also in Py2 for async generators (seriously, that's a Py3.6 feature...)
+ if not node.has_local_scope:
+ collector = YieldNodeCollector()
+ collector.visitchildren(node.loop)
+ if collector.has_await:
+ node.has_local_scope = True
+ self.visitchildren(node)
+ return node
+
+ # cdef variables
+ def handle_bufferdefaults(self, decl):
+ if not isinstance(decl.default, ExprNodes.DictNode):
+ raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
+ self.scope_node.buffer_defaults_node = decl.default
+ self.scope_node.buffer_defaults_pos = decl.pos
+
+ def visit_CVarDefNode(self, node):
+ # This assumes only plain names and pointers are assignable on
+ # declaration. Also, it makes use of the fact that a cdef decl
+ # must appear before the first use, so we don't have to deal with
+ # "i = 3; cdef int i = i" and can simply move the nodes around.
+ try:
+ self.visitchildren(node)
+ stats = [node]
+ newdecls = []
+ for decl in node.declarators:
+ declbase = decl
+ while isinstance(declbase, Nodes.CPtrDeclaratorNode):
+ declbase = declbase.base
+ if isinstance(declbase, Nodes.CNameDeclaratorNode):
+ if declbase.default is not None:
+ if self.scope_type in ('cclass', 'pyclass', 'struct'):
+ if isinstance(self.scope_node, Nodes.CClassDefNode):
+ handler = self.specialattribute_handlers.get(decl.name)
+ if handler:
+ if decl is not declbase:
+ raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
+ handler(decl)
+ continue # Remove declaration
+ raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
+ first_assignment = self.scope_type != 'module'
+ stats.append(Nodes.SingleAssignmentNode(node.pos,
+ lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
+ rhs=declbase.default, first=first_assignment))
+ declbase.default = None
+ newdecls.append(decl)
+ node.declarators = newdecls
+ return stats
+ except PostParseError as e:
+ # An error in a cdef clause is ok, simply remove the declaration
+ # and try to move on to report more errors
+ self.context.nonfatal_error(e)
+ return None
+
+ # Split parallel assignments (a,b = b,a) into separate partial
+ # assignments that are executed rhs-first using temps. This
+ # restructuring must be applied before type analysis so that known
+ # types on rhs and lhs can be matched directly. It is required in
+ # the case that the types cannot be coerced to a Python type in
+ # order to assign from a tuple.
+
+ def visit_SingleAssignmentNode(self, node):
+ self.visitchildren(node)
+ return self._visit_assignment_node(node, [node.lhs, node.rhs])
+
+ def visit_CascadedAssignmentNode(self, node):
+ self.visitchildren(node)
+ return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
+
+ def _visit_assignment_node(self, node, expr_list):
+ """Flatten parallel assignments into separate single
+ assignments or cascaded assignments.
+ """
+ if sum([ 1 for expr in expr_list
+ if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
+ # no parallel assignments => nothing to do
+ return node
+
+ expr_list_list = []
+ flatten_parallel_assignments(expr_list, expr_list_list)
+ temp_refs = []
+ eliminate_rhs_duplicates(expr_list_list, temp_refs)
+
+ nodes = []
+ for expr_list in expr_list_list:
+ lhs_list = expr_list[:-1]
+ rhs = expr_list[-1]
+ if len(lhs_list) == 1:
+ node = Nodes.SingleAssignmentNode(rhs.pos,
+ lhs = lhs_list[0], rhs = rhs)
+ else:
+ node = Nodes.CascadedAssignmentNode(rhs.pos,
+ lhs_list = lhs_list, rhs = rhs)
+ nodes.append(node)
+
+ if len(nodes) == 1:
+ assign_node = nodes[0]
+ else:
+ assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
+
+ if temp_refs:
+ duplicates_and_temps = [ (temp.expression, temp)
+ for temp in temp_refs ]
+ sort_common_subsequences(duplicates_and_temps)
+ for _, temp_ref in duplicates_and_temps[::-1]:
+ assign_node = LetNode(temp_ref, assign_node)
+
+ return assign_node
+
+ def _flatten_sequence(self, seq, result):
+ for arg in seq.args:
+ if arg.is_sequence_constructor:
+ self._flatten_sequence(arg, result)
+ else:
+ result.append(arg)
+ return result
+
+ def visit_DelStatNode(self, node):
+ self.visitchildren(node)
+ node.args = self._flatten_sequence(node, [])
+ return node
+
+ def visit_ExceptClauseNode(self, node):
+ if node.is_except_as:
+ # except-as must delete NameNode target at the end
+ del_target = Nodes.DelStatNode(
+ node.pos,
+ args=[ExprNodes.NameNode(
+ node.target.pos, name=node.target.name)],
+ ignore_nonexisting=True)
+ node.body = Nodes.StatListNode(
+ node.pos,
+ stats=[Nodes.TryFinallyStatNode(
+ node.pos,
+ body=node.body,
+ finally_clause=Nodes.StatListNode(
+ node.pos,
+ stats=[del_target]))])
+ self.visitchildren(node)
+ return node
+
+
+def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
+ """Replace rhs items by LetRefNodes if they appear more than once.
+ Creates a sequence of LetRefNodes that set up the required temps
+ and appends them to ref_node_sequence. The input list is modified
+ in-place.
+ """
+ seen_nodes = set()
+ ref_nodes = {}
+ def find_duplicates(node):
+ if node.is_literal or node.is_name:
+ # no need to replace those; can't include attributes here
+ # as their access is not necessarily side-effect free
+ return
+ if node in seen_nodes:
+ if node not in ref_nodes:
+ ref_node = LetRefNode(node)
+ ref_nodes[node] = ref_node
+ ref_node_sequence.append(ref_node)
+ else:
+ seen_nodes.add(node)
+ if node.is_sequence_constructor:
+ for item in node.args:
+ find_duplicates(item)
+
+ for expr_list in expr_list_list:
+ rhs = expr_list[-1]
+ find_duplicates(rhs)
+ if not ref_nodes:
+ return
+
+ def substitute_nodes(node):
+ if node in ref_nodes:
+ return ref_nodes[node]
+ elif node.is_sequence_constructor:
+ node.args = list(map(substitute_nodes, node.args))
+ return node
+
+ # replace nodes inside of the common subexpressions
+ for node in ref_nodes:
+ if node.is_sequence_constructor:
+ node.args = list(map(substitute_nodes, node.args))
+
+ # replace common subexpressions on all rhs items
+ for expr_list in expr_list_list:
+ expr_list[-1] = substitute_nodes(expr_list[-1])
+
+def sort_common_subsequences(items):
+ """Sort items/subsequences so that all items and subsequences that
+ an item contains appear before the item itself. This is needed
+ because each rhs item must only be evaluated once, so its value
+ must be evaluated first and then reused when packing sequences
+ that contain it.
+
+ This implies a partial order, and the sort must be stable to
+ preserve the original order as much as possible, so we use a
+ simple insertion sort (which is very fast for short sequences, the
+ normal case in practice).
+ """
+ def contains(seq, x):
+ for item in seq:
+ if item is x:
+ return True
+ elif item.is_sequence_constructor and contains(item.args, x):
+ return True
+ return False
+ def lower_than(a,b):
+ return b.is_sequence_constructor and contains(b.args, a)
+
+ for pos, item in enumerate(items):
+ key = item[1] # the ResultRefNode which has already been injected into the sequences
+ new_pos = pos
+ for i in range(pos-1, -1, -1):
+ if lower_than(key, items[i][0]):
+ new_pos = i
+ if new_pos != pos:
+ for i in range(pos, new_pos, -1):
+ items[i] = items[i-1]
+ items[new_pos] = item
+
+def unpack_string_to_character_literals(literal):
+ chars = []
+ pos = literal.pos
+ stype = literal.__class__
+ sval = literal.value
+ sval_type = sval.__class__
+ for char in sval:
+ cval = sval_type(char)
+ chars.append(stype(pos, value=cval, constant_result=cval))
+ return chars
+
+def flatten_parallel_assignments(input, output):
+ # The input is a list of expression nodes, representing the LHSs
+ # and RHS of one (possibly cascaded) assignment statement. For
+ # sequence constructors, rearranges the matching parts of both
+ # sides into a list of equivalent assignments between the
+ # individual elements. This transformation is applied
+ # recursively, so that nested structures get matched as well.
+ rhs = input[-1]
+ if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
+ or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
+ output.append(input)
+ return
+
+ complete_assignments = []
+
+ if rhs.is_sequence_constructor:
+ rhs_args = rhs.args
+ elif rhs.is_string_literal:
+ rhs_args = unpack_string_to_character_literals(rhs)
+
+ rhs_size = len(rhs_args)
+ lhs_targets = [[] for _ in range(rhs_size)]
+ starred_assignments = []
+ for lhs in input[:-1]:
+ if not lhs.is_sequence_constructor:
+ if lhs.is_starred:
+ error(lhs.pos, "starred assignment target must be in a list or tuple")
+ complete_assignments.append(lhs)
+ continue
+ lhs_size = len(lhs.args)
+ starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
+ if starred_targets > 1:
+ error(lhs.pos, "more than 1 starred expression in assignment")
+ output.append([lhs,rhs])
+ continue
+ elif lhs_size - starred_targets > rhs_size:
+ error(lhs.pos, "need more than %d value%s to unpack"
+ % (rhs_size, (rhs_size != 1) and 's' or ''))
+ output.append([lhs,rhs])
+ continue
+ elif starred_targets:
+ map_starred_assignment(lhs_targets, starred_assignments,
+ lhs.args, rhs_args)
+ elif lhs_size < rhs_size:
+ error(lhs.pos, "too many values to unpack (expected %d, got %d)"
+ % (lhs_size, rhs_size))
+ output.append([lhs,rhs])
+ continue
+ else:
+ for targets, expr in zip(lhs_targets, lhs.args):
+ targets.append(expr)
+
+ if complete_assignments:
+ complete_assignments.append(rhs)
+ output.append(complete_assignments)
+
+ # recursively flatten partial assignments
+ for cascade, rhs in zip(lhs_targets, rhs_args):
+ if cascade:
+ cascade.append(rhs)
+ flatten_parallel_assignments(cascade, output)
+
+ # recursively flatten starred assignments
+ for cascade in starred_assignments:
+ if cascade[0].is_sequence_constructor:
+ flatten_parallel_assignments(cascade, output)
+ else:
+ output.append(cascade)
+
+def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
+ # Appends the fixed-position LHS targets to the target list that
+ # appear left and right of the starred argument.
+ #
+ # The starred_assignments list receives a new tuple
+ # (lhs_target, rhs_values_list) that maps the remaining arguments
+ # (those that match the starred target) to a list.
+
+ # left side of the starred target
+ for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
+ if expr.is_starred:
+ starred = i
+ lhs_remaining = len(lhs_args) - i - 1
+ break
+ targets.append(expr)
+ else:
+ raise InternalError("no starred arg found when splitting starred assignment")
+
+ # right side of the starred target
+ for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
+ lhs_args[starred + 1:])):
+ targets.append(expr)
+
+ # the starred target itself, must be assigned a (potentially empty) list
+ target = lhs_args[starred].target # unpack starred node
+ starred_rhs = rhs_args[starred:]
+ if lhs_remaining:
+ starred_rhs = starred_rhs[:-lhs_remaining]
+ if starred_rhs:
+ pos = starred_rhs[0].pos
+ else:
+ pos = target.pos
+ starred_assignments.append([
+ target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
+
+
+class PxdPostParse(CythonTransform, SkipDeclarations):
+ """
+ Basic interpretation/validity checking that should only be
+ done on pxd trees.
+
+ A lot of this checking currently happens in the parser; but
+ what is listed below happens here.
+
+ - "def" functions are let through only if they fill the
+ getbuffer/releasebuffer slots
+
+ - cdef functions are let through only if they are on the
+ top level and are declared "inline"
+ """
+ ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
+ ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
+
+ def __call__(self, node):
+ self.scope_type = 'pxd'
+ return super(PxdPostParse, self).__call__(node)
+
+ def visit_CClassDefNode(self, node):
+ old = self.scope_type
+ self.scope_type = 'cclass'
+ self.visitchildren(node)
+ self.scope_type = old
+ return node
+
+ def visit_FuncDefNode(self, node):
+ # FuncDefNode always come with an implementation (without
+ # an imp they are CVarDefNodes..)
+ err = self.ERR_INLINE_ONLY
+
+ if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
+ and node.name in ('__getbuffer__', '__releasebuffer__')):
+ err = None # allow these slots
+
+ if isinstance(node, Nodes.CFuncDefNode):
+ if (u'inline' in node.modifiers and
+ self.scope_type in ('pxd', 'cclass')):
+ node.inline_in_pxd = True
+ if node.visibility != 'private':
+ err = self.ERR_NOGO_WITH_INLINE % node.visibility
+ elif node.api:
+ err = self.ERR_NOGO_WITH_INLINE % 'api'
+ else:
+ err = None # allow inline function
+ else:
+ err = self.ERR_INLINE_ONLY
+
+ if err:
+ self.context.nonfatal_error(PostParseError(node.pos, err))
+ return None
+ else:
+ return node
+
+
+class TrackNumpyAttributes(VisitorTransform, SkipDeclarations):
+ # TODO: Make name handling as good as in InterpretCompilerDirectives() below - probably best to merge the two.
+ def __init__(self):
+ super(TrackNumpyAttributes, self).__init__()
+ self.numpy_module_names = set()
+
+ def visit_CImportStatNode(self, node):
+ if node.module_name == u"numpy":
+ self.numpy_module_names.add(node.as_name or u"numpy")
+ return node
+
+ def visit_AttributeNode(self, node):
+ self.visitchildren(node)
+ obj = node.obj
+ if (obj.is_name and obj.name in self.numpy_module_names) or obj.is_numpy_attribute:
+ node.is_numpy_attribute = True
+ return node
+
+ visit_Node = VisitorTransform.recurse_to_children
+
+
+class InterpretCompilerDirectives(CythonTransform):
+ """
+ After parsing, directives can be stored in a number of places:
+ - #cython-comments at the top of the file (stored in ModuleNode)
+ - Command-line arguments overriding these
+ - @cython.directivename decorators
+ - with cython.directivename: statements
+
+ This transform is responsible for interpreting these various sources
+ and store the directive in two ways:
+ - Set the directives attribute of the ModuleNode for global directives.
+ - Use a CompilerDirectivesNode to override directives for a subtree.
+
+ (The first one is primarily to not have to modify with the tree
+ structure, so that ModuleNode stay on top.)
+
+ The directives are stored in dictionaries from name to value in effect.
+ Each such dictionary is always filled in for all possible directives,
+ using default values where no value is given by the user.
+
+ The available directives are controlled in Options.py.
+
+ Note that we have to run this prior to analysis, and so some minor
+ duplication of functionality has to occur: We manually track cimports
+ and which names the "cython" module may have been imported to.
+ """
+ unop_method_nodes = {
+ 'typeof': ExprNodes.TypeofNode,
+
+ 'operator.address': ExprNodes.AmpersandNode,
+ 'operator.dereference': ExprNodes.DereferenceNode,
+ 'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
+ 'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
+ 'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
+ 'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
+ 'operator.typeid' : ExprNodes.TypeidNode,
+
+ # For backwards compatibility.
+ 'address': ExprNodes.AmpersandNode,
+ }
+
+ binop_method_nodes = {
+ 'operator.comma' : ExprNodes.c_binop_constructor(','),
+ }
+
+ special_methods = set(['declare', 'union', 'struct', 'typedef',
+ 'sizeof', 'cast', 'pointer', 'compiled',
+ 'NULL', 'fused_type', 'parallel'])
+ special_methods.update(unop_method_nodes)
+
+ valid_parallel_directives = set([
+ "parallel",
+ "prange",
+ "threadid",
+ #"threadsavailable",
+ ])
+
+ def __init__(self, context, compilation_directive_defaults):
+ super(InterpretCompilerDirectives, self).__init__(context)
+ self.cython_module_names = set()
+ self.directive_names = {'staticmethod': 'staticmethod'}
+ self.parallel_directives = {}
+ directives = copy.deepcopy(Options.get_directive_defaults())
+ for key, value in compilation_directive_defaults.items():
+ directives[_unicode(key)] = copy.deepcopy(value)
+ self.directives = directives
+
+ def check_directive_scope(self, pos, directive, scope):
+ legal_scopes = Options.directive_scopes.get(directive, None)
+ if legal_scopes and scope not in legal_scopes:
+ self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
+ 'is not allowed in %s scope' % (directive, scope)))
+ return False
+ else:
+ if directive not in Options.directive_types:
+ error(pos, "Invalid directive: '%s'." % (directive,))
+ return True
+
+ # Set up processing and handle the cython: comments.
+ def visit_ModuleNode(self, node):
+ for key in sorted(node.directive_comments):
+ if not self.check_directive_scope(node.pos, key, 'module'):
+ self.wrong_scope_error(node.pos, key, 'module')
+ del node.directive_comments[key]
+
+ self.module_scope = node.scope
+
+ self.directives.update(node.directive_comments)
+ node.directives = self.directives
+ node.parallel_directives = self.parallel_directives
+ self.visitchildren(node)
+ node.cython_module_names = self.cython_module_names
+ return node
+
+ # The following four functions track imports and cimports that
+ # begin with "cython"
+ def is_cython_directive(self, name):
+ return (name in Options.directive_types or
+ name in self.special_methods or
+ PyrexTypes.parse_basic_type(name))
+
+ def is_parallel_directive(self, full_name, pos):
+ """
+ Checks to see if fullname (e.g. cython.parallel.prange) is a valid
+ parallel directive. If it is a star import it also updates the
+ parallel_directives.
+ """
+ result = (full_name + ".").startswith("cython.parallel.")
+
+ if result:
+ directive = full_name.split('.')
+ if full_name == u"cython.parallel":
+ self.parallel_directives[u"parallel"] = u"cython.parallel"
+ elif full_name == u"cython.parallel.*":
+ for name in self.valid_parallel_directives:
+ self.parallel_directives[name] = u"cython.parallel.%s" % name
+ elif (len(directive) != 3 or
+ directive[-1] not in self.valid_parallel_directives):
+ error(pos, "No such directive: %s" % full_name)
+
+ self.module_scope.use_utility_code(
+ UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
+
+ return result
+
+ def visit_CImportStatNode(self, node):
+ if node.module_name == u"cython":
+ self.cython_module_names.add(node.as_name or u"cython")
+ elif node.module_name.startswith(u"cython."):
+ if node.module_name.startswith(u"cython.parallel."):
+ error(node.pos, node.module_name + " is not a module")
+ if node.module_name == u"cython.parallel":
+ if node.as_name and node.as_name != u"cython":
+ self.parallel_directives[node.as_name] = node.module_name
+ else:
+ self.cython_module_names.add(u"cython")
+ self.parallel_directives[
+ u"cython.parallel"] = node.module_name
+ self.module_scope.use_utility_code(
+ UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
+ elif node.as_name:
+ self.directive_names[node.as_name] = node.module_name[7:]
+ else:
+ self.cython_module_names.add(u"cython")
+ # if this cimport was a compiler directive, we don't
+ # want to leave the cimport node sitting in the tree
+ return None
+ return node
+
+ def visit_FromCImportStatNode(self, node):
+ if not node.relative_level and (
+ node.module_name == u"cython" or node.module_name.startswith(u"cython.")):
+ submodule = (node.module_name + u".")[7:]
+ newimp = []
+
+ for pos, name, as_name, kind in node.imported_names:
+ full_name = submodule + name
+ qualified_name = u"cython." + full_name
+
+ if self.is_parallel_directive(qualified_name, node.pos):
+ # from cython cimport parallel, or
+ # from cython.parallel cimport parallel, prange, ...
+ self.parallel_directives[as_name or name] = qualified_name
+ elif self.is_cython_directive(full_name):
+ self.directive_names[as_name or name] = full_name
+ if kind is not None:
+ self.context.nonfatal_error(PostParseError(pos,
+ "Compiler directive imports must be plain imports"))
+ else:
+ newimp.append((pos, name, as_name, kind))
+
+ if not newimp:
+ return None
+
+ node.imported_names = newimp
+ return node
+
+ def visit_FromImportStatNode(self, node):
+ if (node.module.module_name.value == u"cython") or \
+ node.module.module_name.value.startswith(u"cython."):
+ submodule = (node.module.module_name.value + u".")[7:]
+ newimp = []
+ for name, name_node in node.items:
+ full_name = submodule + name
+ qualified_name = u"cython." + full_name
+ if self.is_parallel_directive(qualified_name, node.pos):
+ self.parallel_directives[name_node.name] = qualified_name
+ elif self.is_cython_directive(full_name):
+ self.directive_names[name_node.name] = full_name
+ else:
+ newimp.append((name, name_node))
+ if not newimp:
+ return None
+ node.items = newimp
+ return node
+
+ def visit_SingleAssignmentNode(self, node):
+ if isinstance(node.rhs, ExprNodes.ImportNode):
+ module_name = node.rhs.module_name.value
+ is_parallel = (module_name + u".").startswith(u"cython.parallel.")
+
+ if module_name != u"cython" and not is_parallel:
+ return node
+
+ module_name = node.rhs.module_name.value
+ as_name = node.lhs.name
+
+ node = Nodes.CImportStatNode(node.pos,
+ module_name = module_name,
+ as_name = as_name)
+ node = self.visit_CImportStatNode(node)
+ else:
+ self.visitchildren(node)
+
+ return node
+
+ def visit_NameNode(self, node):
+ if node.name in self.cython_module_names:
+ node.is_cython_module = True
+ else:
+ directive = self.directive_names.get(node.name)
+ if directive is not None:
+ node.cython_attribute = directive
+ return node
+
+ def visit_NewExprNode(self, node):
+ self.visit(node.cppclass)
+ self.visitchildren(node)
+ return node
+
+ def try_to_parse_directives(self, node):
+ # If node is the contents of an directive (in a with statement or
+ # decorator), returns a list of (directivename, value) pairs.
+ # Otherwise, returns None
+ if isinstance(node, ExprNodes.CallNode):
+ self.visit(node.function)
+ optname = node.function.as_cython_attribute()
+ if optname:
+ directivetype = Options.directive_types.get(optname)
+ if directivetype:
+ args, kwds = node.explicit_args_kwds()
+ directives = []
+ key_value_pairs = []
+ if kwds is not None and directivetype is not dict:
+ for keyvalue in kwds.key_value_pairs:
+ key, value = keyvalue
+ sub_optname = "%s.%s" % (optname, key.value)
+ if Options.directive_types.get(sub_optname):
+ directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
+ else:
+ key_value_pairs.append(keyvalue)
+ if not key_value_pairs:
+ kwds = None
+ else:
+ kwds.key_value_pairs = key_value_pairs
+ if directives and not kwds and not args:
+ return directives
+ directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
+ return directives
+ elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
+ self.visit(node)
+ optname = node.as_cython_attribute()
+ if optname:
+ directivetype = Options.directive_types.get(optname)
+ if directivetype is bool:
+ arg = ExprNodes.BoolNode(node.pos, value=True)
+ return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
+ elif directivetype is None:
+ return [(optname, None)]
+ else:
+ raise PostParseError(
+ node.pos, "The '%s' directive should be used as a function call." % optname)
+ return None
+
+ def try_to_parse_directive(self, optname, args, kwds, pos):
+ if optname == 'np_pythran' and not self.context.cpp:
+ raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
+ elif optname == 'exceptval':
+ # default: exceptval(None, check=True)
+ arg_error = len(args) > 1
+ check = True
+ if kwds and kwds.key_value_pairs:
+ kw = kwds.key_value_pairs[0]
+ if (len(kwds.key_value_pairs) == 1 and
+ kw.key.is_string_literal and kw.key.value == 'check' and
+ isinstance(kw.value, ExprNodes.BoolNode)):
+ check = kw.value.value
+ else:
+ arg_error = True
+ if arg_error:
+ raise PostParseError(
+ pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"')
+ return ('exceptval', (args[0] if args else None, check))
+
+ directivetype = Options.directive_types.get(optname)
+ if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
+ return optname, Options.get_directive_defaults()[optname]
+ elif directivetype is bool:
+ if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
+ raise PostParseError(pos,
+ 'The %s directive takes one compile-time boolean argument' % optname)
+ return (optname, args[0].value)
+ elif directivetype is int:
+ if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
+ raise PostParseError(pos,
+ 'The %s directive takes one compile-time integer argument' % optname)
+ return (optname, int(args[0].value))
+ elif directivetype is str:
+ if kwds is not None or len(args) != 1 or not isinstance(
+ args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
+ raise PostParseError(pos,
+ 'The %s directive takes one compile-time string argument' % optname)
+ return (optname, str(args[0].value))
+ elif directivetype is type:
+ if kwds is not None or len(args) != 1:
+ raise PostParseError(pos,
+ 'The %s directive takes one type argument' % optname)
+ return (optname, args[0])
+ elif directivetype is dict:
+ if len(args) != 0:
+ raise PostParseError(pos,
+ 'The %s directive takes no prepositional arguments' % optname)
+ return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
+ elif directivetype is list:
+ if kwds and len(kwds.key_value_pairs) != 0:
+ raise PostParseError(pos,
+ 'The %s directive takes no keyword arguments' % optname)
+ return optname, [ str(arg.value) for arg in args ]
+ elif callable(directivetype):
+ if kwds is not None or len(args) != 1 or not isinstance(
+ args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
+ raise PostParseError(pos,
+ 'The %s directive takes one compile-time string argument' % optname)
+ return (optname, directivetype(optname, str(args[0].value)))
+ else:
+ assert False
+
+ def visit_with_directives(self, node, directives):
+ if not directives:
+ return self.visit_Node(node)
+
+ old_directives = self.directives
+ new_directives = dict(old_directives)
+ new_directives.update(directives)
+
+ if new_directives == old_directives:
+ return self.visit_Node(node)
+
+ self.directives = new_directives
+ retbody = self.visit_Node(node)
+ self.directives = old_directives
+
+ if not isinstance(retbody, Nodes.StatListNode):
+ retbody = Nodes.StatListNode(node.pos, stats=[retbody])
+ return Nodes.CompilerDirectivesNode(
+ pos=retbody.pos, body=retbody, directives=new_directives)
+
+ # Handle decorators
+ def visit_FuncDefNode(self, node):
+ directives = self._extract_directives(node, 'function')
+ return self.visit_with_directives(node, directives)
+
+ def visit_CVarDefNode(self, node):
+ directives = self._extract_directives(node, 'function')
+ for name, value in directives.items():
+ if name == 'locals':
+ node.directive_locals = value
+ elif name not in ('final', 'staticmethod'):
+ self.context.nonfatal_error(PostParseError(
+ node.pos,
+ "Cdef functions can only take cython.locals(), "
+ "staticmethod, or final decorators, got %s." % name))
+ return self.visit_with_directives(node, directives)
+
+ def visit_CClassDefNode(self, node):
+ directives = self._extract_directives(node, 'cclass')
+ return self.visit_with_directives(node, directives)
+
+ def visit_CppClassNode(self, node):
+ directives = self._extract_directives(node, 'cppclass')
+ return self.visit_with_directives(node, directives)
+
+ def visit_PyClassDefNode(self, node):
+ directives = self._extract_directives(node, 'class')
+ return self.visit_with_directives(node, directives)
+
+ def _extract_directives(self, node, scope_name):
+ if not node.decorators:
+ return {}
+ # Split the decorators into two lists -- real decorators and directives
+ directives = []
+ realdecs = []
+ both = []
+ # Decorators coming first take precedence.
+ for dec in node.decorators[::-1]:
+ new_directives = self.try_to_parse_directives(dec.decorator)
+ if new_directives is not None:
+ for directive in new_directives:
+ if self.check_directive_scope(node.pos, directive[0], scope_name):
+ name, value = directive
+ if self.directives.get(name, object()) != value:
+ directives.append(directive)
+ if directive[0] == 'staticmethod':
+ both.append(dec)
+ # Adapt scope type based on decorators that change it.
+ if directive[0] == 'cclass' and scope_name == 'class':
+ scope_name = 'cclass'
+ else:
+ realdecs.append(dec)
+ if realdecs and (scope_name == 'cclass' or
+ isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
+ raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
+ node.decorators = realdecs[::-1] + both[::-1]
+ # merge or override repeated directives
+ optdict = {}
+ for directive in directives:
+ name, value = directive
+ if name in optdict:
+ old_value = optdict[name]
+ # keywords and arg lists can be merged, everything
+ # else overrides completely
+ if isinstance(old_value, dict):
+ old_value.update(value)
+ elif isinstance(old_value, list):
+ old_value.extend(value)
+ else:
+ optdict[name] = value
+ else:
+ optdict[name] = value
+ return optdict
+
+ # Handle with-statements
+ def visit_WithStatNode(self, node):
+ directive_dict = {}
+ for directive in self.try_to_parse_directives(node.manager) or []:
+ if directive is not None:
+ if node.target is not None:
+ self.context.nonfatal_error(
+ PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
+ else:
+ name, value = directive
+ if name in ('nogil', 'gil'):
+ # special case: in pure mode, "with nogil" spells "with cython.nogil"
+ node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
+ return self.visit_Node(node)
+ if self.check_directive_scope(node.pos, name, 'with statement'):
+ directive_dict[name] = value
+ if directive_dict:
+ return self.visit_with_directives(node.body, directive_dict)
+ return self.visit_Node(node)
+
+
+class ParallelRangeTransform(CythonTransform, SkipDeclarations):
+ """
+ Transform cython.parallel stuff. The parallel_directives come from the
+ module node, set there by InterpretCompilerDirectives.
+
+ x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
+ with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
+ print cython.parallel.threadid() -> ParallelThreadIdNode
+ for i in cython.parallel.prange(...): -> ParallelRangeNode
+ ...
+ """
+
+ # a list of names, maps 'cython.parallel.prange' in the code to
+ # ['cython', 'parallel', 'prange']
+ parallel_directive = None
+
+ # Indicates whether a namenode in an expression is the cython module
+ namenode_is_cython_module = False
+
+ # Keep track of whether we are the context manager of a 'with' statement
+ in_context_manager_section = False
+
+ # One of 'prange' or 'with parallel'. This is used to disallow closely
+ # nested 'with parallel:' blocks
+ state = None
+
+ directive_to_node = {
+ u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
+ # u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
+ u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
+ u"cython.parallel.prange": Nodes.ParallelRangeNode,
+ }
+
+ def node_is_parallel_directive(self, node):
+ return node.name in self.parallel_directives or node.is_cython_module
+
+ def get_directive_class_node(self, node):
+ """
+ Figure out which parallel directive was used and return the associated
+ Node class.
+
+ E.g. for a cython.parallel.prange() call we return ParallelRangeNode
+ """
+ if self.namenode_is_cython_module:
+ directive = '.'.join(self.parallel_directive)
+ else:
+ directive = self.parallel_directives[self.parallel_directive[0]]
+ directive = '%s.%s' % (directive,
+ '.'.join(self.parallel_directive[1:]))
+ directive = directive.rstrip('.')
+
+ cls = self.directive_to_node.get(directive)
+ if cls is None and not (self.namenode_is_cython_module and
+ self.parallel_directive[0] != 'parallel'):
+ error(node.pos, "Invalid directive: %s" % directive)
+
+ self.namenode_is_cython_module = False
+ self.parallel_directive = None
+
+ return cls
+
+ def visit_ModuleNode(self, node):
+ """
+ If any parallel directives were imported, copy them over and visit
+ the AST
+ """
+ if node.parallel_directives:
+ self.parallel_directives = node.parallel_directives
+ return self.visit_Node(node)
+
+ # No parallel directives were imported, so they can't be used :)
+ return node
+
+ def visit_NameNode(self, node):
+ if self.node_is_parallel_directive(node):
+ self.parallel_directive = [node.name]
+ self.namenode_is_cython_module = node.is_cython_module
+ return node
+
+ def visit_AttributeNode(self, node):
+ self.visitchildren(node)
+ if self.parallel_directive:
+ self.parallel_directive.append(node.attribute)
+ return node
+
+ def visit_CallNode(self, node):
+ self.visit(node.function)
+ if not self.parallel_directive:
+ self.visitchildren(node, exclude=('function',))
+ return node
+
+ # We are a parallel directive, replace this node with the
+ # corresponding ParallelSomethingSomething node
+
+ if isinstance(node, ExprNodes.GeneralCallNode):
+ args = node.positional_args.args
+ kwargs = node.keyword_args
+ else:
+ args = node.args
+ kwargs = {}
+
+ parallel_directive_class = self.get_directive_class_node(node)
+ if parallel_directive_class:
+ # Note: in case of a parallel() the body is set by
+ # visit_WithStatNode
+ node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
+
+ return node
+
+ def visit_WithStatNode(self, node):
+ "Rewrite with cython.parallel.parallel() blocks"
+ newnode = self.visit(node.manager)
+
+ if isinstance(newnode, Nodes.ParallelWithBlockNode):
+ if self.state == 'parallel with':
+ error(node.manager.pos,
+ "Nested parallel with blocks are disallowed")
+
+ self.state = 'parallel with'
+ body = self.visit(node.body)
+ self.state = None
+
+ newnode.body = body
+ return newnode
+ elif self.parallel_directive:
+ parallel_directive_class = self.get_directive_class_node(node)
+
+ if not parallel_directive_class:
+ # There was an error, stop here and now
+ return None
+
+ if parallel_directive_class is Nodes.ParallelWithBlockNode:
+ error(node.pos, "The parallel directive must be called")
+ return None
+
+ node.body = self.visit(node.body)
+ return node
+
+ def visit_ForInStatNode(self, node):
+ "Rewrite 'for i in cython.parallel.prange(...):'"
+ self.visit(node.iterator)
+ self.visit(node.target)
+
+ in_prange = isinstance(node.iterator.sequence,
+ Nodes.ParallelRangeNode)
+ previous_state = self.state
+
+ if in_prange:
+ # This will replace the entire ForInStatNode, so copy the
+ # attributes
+ parallel_range_node = node.iterator.sequence
+
+ parallel_range_node.target = node.target
+ parallel_range_node.body = node.body
+ parallel_range_node.else_clause = node.else_clause
+
+ node = parallel_range_node
+
+ if not isinstance(node.target, ExprNodes.NameNode):
+ error(node.target.pos,
+ "Can only iterate over an iteration variable")
+
+ self.state = 'prange'
+
+ self.visit(node.body)
+ self.state = previous_state
+ self.visit(node.else_clause)
+ return node
+
+ def visit(self, node):
+ "Visit a node that may be None"
+ if node is not None:
+ return super(ParallelRangeTransform, self).visit(node)
+
+
+class WithTransform(CythonTransform, SkipDeclarations):
+ def visit_WithStatNode(self, node):
+ self.visitchildren(node, 'body')
+ pos = node.pos
+ is_async = node.is_async
+ body, target, manager = node.body, node.target, node.manager
+ node.enter_call = ExprNodes.SimpleCallNode(
+ pos, function=ExprNodes.AttributeNode(
+ pos, obj=ExprNodes.CloneNode(manager),
+ attribute=EncodedString('__aenter__' if is_async else '__enter__'),
+ is_special_lookup=True),
+ args=[],
+ is_temp=True)
+
+ if is_async:
+ node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
+
+ if target is not None:
+ body = Nodes.StatListNode(
+ pos, stats=[
+ Nodes.WithTargetAssignmentStatNode(
+ pos, lhs=target, with_node=node),
+ body])
+
+ excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
+ ExprNodes.ExcValueNode(pos) for _ in range(3)])
+ except_clause = Nodes.ExceptClauseNode(
+ pos, body=Nodes.IfStatNode(
+ pos, if_clauses=[
+ Nodes.IfClauseNode(
+ pos, condition=ExprNodes.NotNode(
+ pos, operand=ExprNodes.WithExitCallNode(
+ pos, with_stat=node,
+ test_if_run=False,
+ args=excinfo_target,
+ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+ body=Nodes.ReraiseStatNode(pos),
+ ),
+ ],
+ else_clause=None),
+ pattern=None,
+ target=None,
+ excinfo_target=excinfo_target,
+ )
+
+ node.body = Nodes.TryFinallyStatNode(
+ pos, body=Nodes.TryExceptStatNode(
+ pos, body=body,
+ except_clauses=[except_clause],
+ else_clause=None,
+ ),
+ finally_clause=Nodes.ExprStatNode(
+ pos, expr=ExprNodes.WithExitCallNode(
+ pos, with_stat=node,
+ test_if_run=True,
+ args=ExprNodes.TupleNode(
+ pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
+ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+ handle_error_case=False,
+ )
+ return node
+
+ def visit_ExprNode(self, node):
+ # With statements are never inside expressions.
+ return node
+
+
+class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
+ """
+ Transforms method decorators in cdef classes into nested calls or properties.
+
+ Python-style decorator properties are transformed into a PropertyNode
+ with up to the three getter, setter and deleter DefNodes.
+ The functional style isn't supported yet.
+ """
+ _properties = None
+
+ _map_property_attribute = {
+ 'getter': '__get__',
+ 'setter': '__set__',
+ 'deleter': '__del__',
+ }.get
+
+ def visit_CClassDefNode(self, node):
+ if self._properties is None:
+ self._properties = []
+ self._properties.append({})
+ super(DecoratorTransform, self).visit_CClassDefNode(node)
+ self._properties.pop()
+ return node
+
+ def visit_PropertyNode(self, node):
+ # Low-level warning for other code until we can convert all our uses over.
+ level = 2 if isinstance(node.pos[0], str) else 0
+ warning(node.pos, "'property %s:' syntax is deprecated, use '@property'" % node.name, level)
+ return node
+
+ def visit_DefNode(self, node):
+ scope_type = self.scope_type
+ node = self.visit_FuncDefNode(node)
+ if scope_type != 'cclass' or not node.decorators:
+ return node
+
+ # transform @property decorators
+ properties = self._properties[-1]
+ for decorator_node in node.decorators[::-1]:
+ decorator = decorator_node.decorator
+ if decorator.is_name and decorator.name == 'property':
+ if len(node.decorators) > 1:
+ return self._reject_decorated_property(node, decorator_node)
+ name = node.name
+ node.name = EncodedString('__get__')
+ node.decorators.remove(decorator_node)
+ stat_list = [node]
+ if name in properties:
+ prop = properties[name]
+ prop.pos = node.pos
+ prop.doc = node.doc
+ prop.body.stats = stat_list
+ return []
+ prop = Nodes.PropertyNode(node.pos, name=name)
+ prop.doc = node.doc
+ prop.body = Nodes.StatListNode(node.pos, stats=stat_list)
+ properties[name] = prop
+ return [prop]
+ elif decorator.is_attribute and decorator.obj.name in properties:
+ handler_name = self._map_property_attribute(decorator.attribute)
+ if handler_name:
+ if decorator.obj.name != node.name:
+ # CPython does not generate an error or warning, but not something useful either.
+ error(decorator_node.pos,
+ "Mismatching property names, expected '%s', got '%s'" % (
+ decorator.obj.name, node.name))
+ elif len(node.decorators) > 1:
+ return self._reject_decorated_property(node, decorator_node)
+ else:
+ return self._add_to_property(properties, node, handler_name, decorator_node)
+
+ # we clear node.decorators, so we need to set the
+ # is_staticmethod/is_classmethod attributes now
+ for decorator in node.decorators:
+ func = decorator.decorator
+ if func.is_name:
+ node.is_classmethod |= func.name == 'classmethod'
+ node.is_staticmethod |= func.name == 'staticmethod'
+
+ # transform normal decorators
+ decs = node.decorators
+ node.decorators = None
+ return self.chain_decorators(node, decs, node.name)
+
+ @staticmethod
+ def _reject_decorated_property(node, decorator_node):
+ # restrict transformation to outermost decorator as wrapped properties will probably not work
+ for deco in node.decorators:
+ if deco != decorator_node:
+ error(deco.pos, "Property methods with additional decorators are not supported")
+ return node
+
+ @staticmethod
+ def _add_to_property(properties, node, name, decorator):
+ prop = properties[node.name]
+ node.name = name
+ node.decorators.remove(decorator)
+ stats = prop.body.stats
+ for i, stat in enumerate(stats):
+ if stat.name == name:
+ stats[i] = node
+ break
+ else:
+ stats.append(node)
+ return []
+
+ @staticmethod
+ def chain_decorators(node, decorators, name):
+ """
+ Decorators are applied directly in DefNode and PyClassDefNode to avoid
+ reassignments to the function/class name - except for cdef class methods.
+ For those, the reassignment is required as methods are originally
+ defined in the PyMethodDef struct.
+
+ The IndirectionNode allows DefNode to override the decorator.
+ """
+ decorator_result = ExprNodes.NameNode(node.pos, name=name)
+ for decorator in decorators[::-1]:
+ decorator_result = ExprNodes.SimpleCallNode(
+ decorator.pos,
+ function=decorator.decorator,
+ args=[decorator_result])
+
+ name_node = ExprNodes.NameNode(node.pos, name=name)
+ reassignment = Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=name_node,
+ rhs=decorator_result)
+
+ reassignment = Nodes.IndirectionNode([reassignment])
+ node.decorator_indirection = reassignment
+ return [node, reassignment]
+
+
+class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
+ """
+ Only part of the CythonUtilityCode pipeline. Must be run before
+ DecoratorTransform in case this is a decorator for a cdef class.
+ It filters out @cname('my_cname') decorators and rewrites them to
+ CnameDecoratorNodes.
+ """
+
+ def handle_function(self, node):
+ if not getattr(node, 'decorators', None):
+ return self.visit_Node(node)
+
+ for i, decorator in enumerate(node.decorators):
+ decorator = decorator.decorator
+
+ if (isinstance(decorator, ExprNodes.CallNode) and
+ decorator.function.is_name and
+ decorator.function.name == 'cname'):
+ args, kwargs = decorator.explicit_args_kwds()
+
+ if kwargs:
+ raise AssertionError(
+ "cname decorator does not take keyword arguments")
+
+ if len(args) != 1:
+ raise AssertionError(
+ "cname decorator takes exactly one argument")
+
+ if not (args[0].is_literal and
+ args[0].type == Builtin.str_type):
+ raise AssertionError(
+ "argument to cname decorator must be a string literal")
+
+ cname = args[0].compile_time_value(None)
+ del node.decorators[i]
+ node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
+ cname=cname)
+ break
+
+ return self.visit_Node(node)
+
+ visit_FuncDefNode = handle_function
+ visit_CClassDefNode = handle_function
+ visit_CEnumDefNode = handle_function
+ visit_CStructOrUnionDefNode = handle_function
+
+
+class ForwardDeclareTypes(CythonTransform):
+
+ def visit_CompilerDirectivesNode(self, node):
+ env = self.module_scope
+ old = env.directives
+ env.directives = node.directives
+ self.visitchildren(node)
+ env.directives = old
+ return node
+
+ def visit_ModuleNode(self, node):
+ self.module_scope = node.scope
+ self.module_scope.directives = node.directives
+ self.visitchildren(node)
+ return node
+
+ def visit_CDefExternNode(self, node):
+ old_cinclude_flag = self.module_scope.in_cinclude
+ self.module_scope.in_cinclude = 1
+ self.visitchildren(node)
+ self.module_scope.in_cinclude = old_cinclude_flag
+ return node
+
+ def visit_CEnumDefNode(self, node):
+ node.declare(self.module_scope)
+ return node
+
+ def visit_CStructOrUnionDefNode(self, node):
+ if node.name not in self.module_scope.entries:
+ node.declare(self.module_scope)
+ return node
+
+ def visit_CClassDefNode(self, node):
+ if node.class_name not in self.module_scope.entries:
+ node.declare(self.module_scope)
+ # Expand fused methods of .pxd declared types to construct the final vtable order.
+ type = self.module_scope.entries[node.class_name].type
+ if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope:
+ scope = type.scope
+ for entry in scope.cfunc_entries:
+ if entry.type and entry.type.is_fused:
+ entry.type.get_all_specialized_function_types()
+ return node
+
+
+class AnalyseDeclarationsTransform(EnvTransform):
+
+ basic_property = TreeFragment(u"""
+property NAME:
+ def __get__(self):
+ return ATTR
+ def __set__(self, value):
+ ATTR = value
+ """, level='c_class', pipeline=[NormalizeTree(None)])
+ basic_pyobject_property = TreeFragment(u"""
+property NAME:
+ def __get__(self):
+ return ATTR
+ def __set__(self, value):
+ ATTR = value
+ def __del__(self):
+ ATTR = None
+ """, level='c_class', pipeline=[NormalizeTree(None)])
+ basic_property_ro = TreeFragment(u"""
+property NAME:
+ def __get__(self):
+ return ATTR
+ """, level='c_class', pipeline=[NormalizeTree(None)])
+
+ struct_or_union_wrapper = TreeFragment(u"""
+cdef class NAME:
+ cdef TYPE value
+ def __init__(self, MEMBER=None):
+ cdef int count
+ count = 0
+ INIT_ASSIGNMENTS
+ if IS_UNION and count > 1:
+ raise ValueError, "At most one union member should be specified."
+ def __str__(self):
+ return STR_FORMAT % MEMBER_TUPLE
+ def __repr__(self):
+ return REPR_FORMAT % MEMBER_TUPLE
+ """, pipeline=[NormalizeTree(None)])
+
+ init_assignment = TreeFragment(u"""
+if VALUE is not None:
+ ATTR = VALUE
+ count += 1
+ """, pipeline=[NormalizeTree(None)])
+
+ fused_function = None
+ in_lambda = 0
+
+ def __call__(self, root):
+ # needed to determine if a cdef var is declared after it's used.
+ self.seen_vars_stack = []
+ self.fused_error_funcs = set()
+ super_class = super(AnalyseDeclarationsTransform, self)
+ self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
+ return super_class.__call__(root)
+
+ def visit_NameNode(self, node):
+ self.seen_vars_stack[-1].add(node.name)
+ return node
+
+ def visit_ModuleNode(self, node):
+ # Pickling support requires injecting module-level nodes.
+ self.extra_module_declarations = []
+ self.seen_vars_stack.append(set())
+ node.analyse_declarations(self.current_env())
+ self.visitchildren(node)
+ self.seen_vars_stack.pop()
+ node.body.stats.extend(self.extra_module_declarations)
+ return node
+
+ def visit_LambdaNode(self, node):
+ self.in_lambda += 1
+ node.analyse_declarations(self.current_env())
+ self.visitchildren(node)
+ self.in_lambda -= 1
+ return node
+
+ def visit_CClassDefNode(self, node):
+ node = self.visit_ClassDefNode(node)
+ if node.scope and node.scope.implemented and node.body:
+ stats = []
+ for entry in node.scope.var_entries:
+ if entry.needs_property:
+ property = self.create_Property(entry)
+ property.analyse_declarations(node.scope)
+ self.visit(property)
+ stats.append(property)
+ if stats:
+ node.body.stats += stats
+ if (node.visibility != 'extern'
+ and not node.scope.lookup('__reduce__')
+ and not node.scope.lookup('__reduce_ex__')):
+ self._inject_pickle_methods(node)
+ return node
+
+ def _inject_pickle_methods(self, node):
+ env = self.current_env()
+ if node.scope.directives['auto_pickle'] is False: # None means attempt it.
+ # Old behavior of not doing anything.
+ return
+ auto_pickle_forced = node.scope.directives['auto_pickle'] is True
+
+ all_members = []
+ cls = node.entry.type
+ cinit = None
+ inherited_reduce = None
+ while cls is not None:
+ all_members.extend(e for e in cls.scope.var_entries if e.name not in ('__weakref__', '__dict__'))
+ cinit = cinit or cls.scope.lookup('__cinit__')
+ inherited_reduce = inherited_reduce or cls.scope.lookup('__reduce__') or cls.scope.lookup('__reduce_ex__')
+ cls = cls.base_type
+ all_members.sort(key=lambda e: e.name)
+
+ if inherited_reduce:
+ # This is not failsafe, as we may not know whether a cimported class defines a __reduce__.
+ # This is why we define __reduce_cython__ and only replace __reduce__
+ # (via ExtensionTypes.SetupReduce utility code) at runtime on class creation.
+ return
+
+ non_py = [
+ e for e in all_members
+ if not e.type.is_pyobject and (not e.type.can_coerce_to_pyobject(env)
+ or not e.type.can_coerce_from_pyobject(env))
+ ]
+
+ structs = [e for e in all_members if e.type.is_struct_or_union]
+
+ if cinit or non_py or (structs and not auto_pickle_forced):
+ if cinit:
+ # TODO(robertwb): We could allow this if __cinit__ has no require arguments.
+ msg = 'no default __reduce__ due to non-trivial __cinit__'
+ elif non_py:
+ msg = "%s cannot be converted to a Python object for pickling" % ','.join("self.%s" % e.name for e in non_py)
+ else:
+ # Extern structs may be only partially defined.
+ # TODO(robertwb): Limit the restriction to extern
+ # (and recursively extern-containing) structs.
+ msg = ("Pickling of struct members such as %s must be explicitly requested "
+ "with @auto_pickle(True)" % ','.join("self.%s" % e.name for e in structs))
+
+ if auto_pickle_forced:
+ error(node.pos, msg)
+
+ pickle_func = TreeFragment(u"""
+ def __reduce_cython__(self):
+ raise TypeError("%(msg)s")
+ def __setstate_cython__(self, __pyx_state):
+ raise TypeError("%(msg)s")
+ """ % {'msg': msg},
+ level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
+ pickle_func.analyse_declarations(node.scope)
+ self.visit(pickle_func)
+ node.body.stats.append(pickle_func)
+
+ else:
+ for e in all_members:
+ if not e.type.is_pyobject:
+ e.type.create_to_py_utility_code(env)
+ e.type.create_from_py_utility_code(env)
+ all_members_names = [e.name for e in all_members]
+ checksums = _calculate_pickle_checksums(all_members_names)
+
+ unpickle_func_name = '__pyx_unpickle_%s' % node.class_name
+
+ # TODO(robertwb): Move the state into the third argument
+ # so it can be pickled *after* self is memoized.
+ unpickle_func = TreeFragment(u"""
+ def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state):
+ cdef object __pyx_PickleError
+ cdef object __pyx_result
+ if __pyx_checksum not in %(checksums)s:
+ from pickle import PickleError as __pyx_PickleError
+ raise __pyx_PickleError("Incompatible checksums (0x%%x vs %(checksums)s = (%(members)s))" %% __pyx_checksum)
+ __pyx_result = %(class_name)s.__new__(__pyx_type)
+ if __pyx_state is not None:
+ %(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state)
+ return __pyx_result
+
+ cdef %(unpickle_func_name)s__set_state(%(class_name)s __pyx_result, tuple __pyx_state):
+ %(assignments)s
+ if len(__pyx_state) > %(num_members)d and hasattr(__pyx_result, '__dict__'):
+ __pyx_result.__dict__.update(__pyx_state[%(num_members)d])
+ """ % {
+ 'unpickle_func_name': unpickle_func_name,
+ 'checksums': "(%s)" % ', '.join(checksums),
+ 'members': ', '.join(all_members_names),
+ 'class_name': node.class_name,
+ 'assignments': '; '.join(
+ '__pyx_result.%s = __pyx_state[%s]' % (v, ix)
+ for ix, v in enumerate(all_members_names)),
+ 'num_members': len(all_members_names),
+ }, level='module', pipeline=[NormalizeTree(None)]).substitute({})
+ unpickle_func.analyse_declarations(node.entry.scope)
+ self.visit(unpickle_func)
+ self.extra_module_declarations.append(unpickle_func)
+
+ pickle_func = TreeFragment(u"""
+ def __reduce_cython__(self):
+ cdef tuple state
+ cdef object _dict
+ cdef bint use_setstate
+ state = (%(members)s)
+ _dict = getattr(self, '__dict__', None)
+ if _dict is not None:
+ state += (_dict,)
+ use_setstate = True
+ else:
+ use_setstate = %(any_notnone_members)s
+ if use_setstate:
+ return %(unpickle_func_name)s, (type(self), %(checksum)s, None), state
+ else:
+ return %(unpickle_func_name)s, (type(self), %(checksum)s, state)
+
+ def __setstate_cython__(self, __pyx_state):
+ %(unpickle_func_name)s__set_state(self, __pyx_state)
+ """ % {
+ 'unpickle_func_name': unpickle_func_name,
+ 'checksum': checksums[0],
+ 'members': ', '.join('self.%s' % v for v in all_members_names) + (',' if len(all_members_names) == 1 else ''),
+ # Even better, we could check PyType_IS_GC.
+ 'any_notnone_members' : ' or '.join(['self.%s is not None' % e.name for e in all_members if e.type.is_pyobject] or ['False']),
+ },
+ level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
+ pickle_func.analyse_declarations(node.scope)
+ self.enter_scope(node, node.scope) # functions should be visited in the class scope
+ self.visit(pickle_func)
+ self.exit_scope()
+ node.body.stats.append(pickle_func)
+
+ def _handle_fused_def_decorators(self, old_decorators, env, node):
+ """
+ Create function calls to the decorators and reassignments to
+ the function.
+ """
+ # Delete staticmethod and classmethod decorators, this is
+ # handled directly by the fused function object.
+ decorators = []
+ for decorator in old_decorators:
+ func = decorator.decorator
+ if (not func.is_name or
+ func.name not in ('staticmethod', 'classmethod') or
+ env.lookup_here(func.name)):
+ # not a static or classmethod
+ decorators.append(decorator)
+
+ if decorators:
+ transform = DecoratorTransform(self.context)
+ def_node = node.node
+ _, reassignments = transform.chain_decorators(
+ def_node, decorators, def_node.name)
+ reassignments.analyse_declarations(env)
+ node = [node, reassignments]
+
+ return node
+
+ def _handle_def(self, decorators, env, node):
+ "Handle def or cpdef fused functions"
+ # Create PyCFunction nodes for each specialization
+ node.stats.insert(0, node.py_func)
+ node.py_func = self.visit(node.py_func)
+ node.update_fused_defnode_entry(env)
+ pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func, binding=True)
+ pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
+ node.resulting_fused_function = pycfunc
+ # Create assignment node for our def function
+ node.fused_func_assignment = self._create_assignment(
+ node.py_func, ExprNodes.CloneNode(pycfunc), env)
+
+ if decorators:
+ node = self._handle_fused_def_decorators(decorators, env, node)
+
+ return node
+
+ def _create_fused_function(self, env, node):
+ "Create a fused function for a DefNode with fused arguments"
+ from . import FusedNode
+
+ if self.fused_function or self.in_lambda:
+ if self.fused_function not in self.fused_error_funcs:
+ if self.in_lambda:
+ error(node.pos, "Fused lambdas not allowed")
+ else:
+ error(node.pos, "Cannot nest fused functions")
+
+ self.fused_error_funcs.add(self.fused_function)
+
+ node.body = Nodes.PassStatNode(node.pos)
+ for arg in node.args:
+ if arg.type.is_fused:
+ arg.type = arg.type.get_fused_types()[0]
+
+ return node
+
+ decorators = getattr(node, 'decorators', None)
+ node = FusedNode.FusedCFuncDefNode(node, env)
+ self.fused_function = node
+ self.visitchildren(node)
+ self.fused_function = None
+ if node.py_func:
+ node = self._handle_def(decorators, env, node)
+
+ return node
+
+ def _handle_nogil_cleanup(self, lenv, node):
+ "Handle cleanup for 'with gil' blocks in nogil functions."
+ if lenv.nogil and lenv.has_with_gil_block:
+ # Acquire the GIL for cleanup in 'nogil' functions, by wrapping
+ # the entire function body in try/finally.
+ # The corresponding release will be taken care of by
+ # Nodes.FuncDefNode.generate_function_definitions()
+ node.body = Nodes.NogilTryFinallyStatNode(
+ node.body.pos,
+ body=node.body,
+ finally_clause=Nodes.EnsureGILNode(node.body.pos),
+ finally_except_clause=Nodes.EnsureGILNode(node.body.pos))
+
+ def _handle_fused(self, node):
+ if node.is_generator and node.has_fused_arguments:
+ node.has_fused_arguments = False
+ error(node.pos, "Fused generators not supported")
+ node.gbody = Nodes.StatListNode(node.pos,
+ stats=[],
+ body=Nodes.PassStatNode(node.pos))
+
+ return node.has_fused_arguments
+
+ def visit_FuncDefNode(self, node):
+ """
+ Analyse a function and its body, as that hasn't happened yet. Also
+ analyse the directive_locals set by @cython.locals().
+
+ Then, if we are a function with fused arguments, replace the function
+ (after it has declared itself in the symbol table!) with a
+ FusedCFuncDefNode, and analyse its children (which are in turn normal
+ functions). If we're a normal function, just analyse the body of the
+ function.
+ """
+ env = self.current_env()
+
+ self.seen_vars_stack.append(set())
+ lenv = node.local_scope
+ node.declare_arguments(lenv)
+
+ # @cython.locals(...)
+ for var, type_node in node.directive_locals.items():
+ if not lenv.lookup_here(var): # don't redeclare args
+ type = type_node.analyse_as_type(lenv)
+ if type:
+ lenv.declare_var(var, type, type_node.pos)
+ else:
+ error(type_node.pos, "Not a type")
+
+ if self._handle_fused(node):
+ node = self._create_fused_function(env, node)
+ else:
+ node.body.analyse_declarations(lenv)
+ self._handle_nogil_cleanup(lenv, node)
+ self._super_visit_FuncDefNode(node)
+
+ self.seen_vars_stack.pop()
+ return node
+
+ def visit_DefNode(self, node):
+ node = self.visit_FuncDefNode(node)
+ env = self.current_env()
+ if isinstance(node, Nodes.DefNode) and node.is_wrapper:
+ env = env.parent_scope
+ if (not isinstance(node, Nodes.DefNode) or
+ node.fused_py_func or node.is_generator_body or
+ not node.needs_assignment_synthesis(env)):
+ return node
+ return [node, self._synthesize_assignment(node, env)]
+
+ def visit_GeneratorBodyDefNode(self, node):
+ return self.visit_FuncDefNode(node)
+
+ def _synthesize_assignment(self, node, env):
+ # Synthesize assignment node and put it right after defnode
+ genv = env
+ while genv.is_py_class_scope or genv.is_c_class_scope:
+ genv = genv.outer_scope
+
+ if genv.is_closure_scope:
+ rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
+ node.pos, def_node=node,
+ pymethdef_cname=node.entry.pymethdef_cname,
+ code_object=ExprNodes.CodeObjectNode(node))
+ else:
+ binding = self.current_directives.get('binding')
+ rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
+ node.code_object = rhs.code_object
+ if node.is_generator:
+ node.gbody.code_object = node.code_object
+
+ if env.is_py_class_scope:
+ rhs.binding = True
+
+ node.is_cyfunction = rhs.binding
+ return self._create_assignment(node, rhs, env)
+
+ def _create_assignment(self, def_node, rhs, env):
+ if def_node.decorators:
+ for decorator in def_node.decorators[::-1]:
+ rhs = ExprNodes.SimpleCallNode(
+ decorator.pos,
+ function = decorator.decorator,
+ args = [rhs])
+ def_node.decorators = None
+
+ assmt = Nodes.SingleAssignmentNode(
+ def_node.pos,
+ lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
+ rhs=rhs)
+ assmt.analyse_declarations(env)
+ return assmt
+
+ def visit_ScopedExprNode(self, node):
+ env = self.current_env()
+ node.analyse_declarations(env)
+ # the node may or may not have a local scope
+ if node.has_local_scope:
+ self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
+ self.enter_scope(node, node.expr_scope)
+ node.analyse_scoped_declarations(node.expr_scope)
+ self.visitchildren(node)
+ self.exit_scope()
+ self.seen_vars_stack.pop()
+ else:
+ node.analyse_scoped_declarations(env)
+ self.visitchildren(node)
+ return node
+
+ def visit_TempResultFromStatNode(self, node):
+ self.visitchildren(node)
+ node.analyse_declarations(self.current_env())
+ return node
+
+ def visit_CppClassNode(self, node):
+ if node.visibility == 'extern':
+ return None
+ else:
+ return self.visit_ClassDefNode(node)
+
+ def visit_CStructOrUnionDefNode(self, node):
+ # Create a wrapper node if needed.
+ # We want to use the struct type information (so it can't happen
+ # before this phase) but also create new objects to be declared
+ # (so it can't happen later).
+ # Note that we don't return the original node, as it is
+ # never used after this phase.
+ if True: # private (default)
+ return None
+
+ self_value = ExprNodes.AttributeNode(
+ pos = node.pos,
+ obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
+ attribute = EncodedString(u"value"))
+ var_entries = node.entry.type.scope.var_entries
+ attributes = []
+ for entry in var_entries:
+ attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
+ obj = self_value,
+ attribute = entry.name))
+ # __init__ assignments
+ init_assignments = []
+ for entry, attr in zip(var_entries, attributes):
+ # TODO: branch on visibility
+ init_assignments.append(self.init_assignment.substitute({
+ u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
+ u"ATTR": attr,
+ }, pos = entry.pos))
+
+ # create the class
+ str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
+ wrapper_class = self.struct_or_union_wrapper.substitute({
+ u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
+ u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
+ u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
+ u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
+ u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
+ }, pos = node.pos).stats[0]
+ wrapper_class.class_name = node.name
+ wrapper_class.shadow = True
+ class_body = wrapper_class.body.stats
+
+ # fix value type
+ assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
+ class_body[0].base_type.name = node.name
+
+ # fix __init__ arguments
+ init_method = class_body[1]
+ assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
+ arg_template = init_method.args[1]
+ if not node.entry.type.is_struct:
+ arg_template.kw_only = True
+ del init_method.args[1]
+ for entry, attr in zip(var_entries, attributes):
+ arg = copy.deepcopy(arg_template)
+ arg.declarator.name = entry.name
+ init_method.args.append(arg)
+
+ # setters/getters
+ for entry, attr in zip(var_entries, attributes):
+ # TODO: branch on visibility
+ if entry.type.is_pyobject:
+ template = self.basic_pyobject_property
+ else:
+ template = self.basic_property
+ property = template.substitute({
+ u"ATTR": attr,
+ }, pos = entry.pos).stats[0]
+ property.name = entry.name
+ wrapper_class.body.stats.append(property)
+
+ wrapper_class.analyse_declarations(self.current_env())
+ return self.visit_CClassDefNode(wrapper_class)
+
+ # Some nodes are no longer needed after declaration
+ # analysis and can be dropped. The analysis was performed
+ # on these nodes in a separate recursive process from the
+ # enclosing function or module, so we can simply drop them.
+ def visit_CDeclaratorNode(self, node):
+ # necessary to ensure that all CNameDeclaratorNodes are visited.
+ self.visitchildren(node)
+ return node
+
+ def visit_CTypeDefNode(self, node):
+ return node
+
+ def visit_CBaseTypeNode(self, node):
+ return None
+
+ def visit_CEnumDefNode(self, node):
+ if node.visibility == 'public':
+ return node
+ else:
+ return None
+
+ def visit_CNameDeclaratorNode(self, node):
+ if node.name in self.seen_vars_stack[-1]:
+ entry = self.current_env().lookup(node.name)
+ if (entry is None or entry.visibility != 'extern'
+ and not entry.scope.is_c_class_scope):
+ warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
+ self.visitchildren(node)
+ return node
+
+ def visit_CVarDefNode(self, node):
+ # to ensure all CNameDeclaratorNodes are visited.
+ self.visitchildren(node)
+ return None
+
+ def visit_CnameDecoratorNode(self, node):
+ child_node = self.visit(node.node)
+ if not child_node:
+ return None
+ if type(child_node) is list: # Assignment synthesized
+ node.child_node = child_node[0]
+ return [node] + child_node[1:]
+ node.node = child_node
+ return node
+
+ def create_Property(self, entry):
+ if entry.visibility == 'public':
+ if entry.type.is_pyobject:
+ template = self.basic_pyobject_property
+ else:
+ template = self.basic_property
+ elif entry.visibility == 'readonly':
+ template = self.basic_property_ro
+ property = template.substitute({
+ u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
+ obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
+ attribute=entry.name),
+ }, pos=entry.pos).stats[0]
+ property.name = entry.name
+ property.doc = entry.doc
+ return property
+
+
+def _calculate_pickle_checksums(member_names):
+ # Cython 0.x used MD5 for the checksum, which a few Python installations remove for security reasons.
+ # SHA-256 should be ok for years to come, but early Cython 3.0 alpha releases used SHA-1,
+ # which may not be.
+ member_names_string = ' '.join(member_names).encode('utf-8')
+ hash_kwargs = {'usedforsecurity': False} if sys.version_info >= (3, 9) else {}
+ checksums = []
+ for algo_name in ['md5', 'sha256', 'sha1']:
+ try:
+ mkchecksum = getattr(hashlib, algo_name)
+ checksum = mkchecksum(member_names_string, **hash_kwargs).hexdigest()
+ except (AttributeError, ValueError):
+ # The algorithm (i.e. MD5) might not be there at all, or might be blocked at runtime.
+ continue
+ checksums.append('0x' + checksum[:7])
+ return checksums
+
+
+class CalculateQualifiedNamesTransform(EnvTransform):
+ """
+ Calculate and store the '__qualname__' and the global
+ module name on some nodes.
+ """
+ def visit_ModuleNode(self, node):
+ self.module_name = self.global_scope().qualified_name
+ self.qualified_name = []
+ _super = super(CalculateQualifiedNamesTransform, self)
+ self._super_visit_FuncDefNode = _super.visit_FuncDefNode
+ self._super_visit_ClassDefNode = _super.visit_ClassDefNode
+ self.visitchildren(node)
+ return node
+
+ def _set_qualname(self, node, name=None):
+ if name:
+ qualname = self.qualified_name[:]
+ qualname.append(name)
+ else:
+ qualname = self.qualified_name
+ node.qualname = EncodedString('.'.join(qualname))
+ node.module_name = self.module_name
+
+ def _append_entry(self, entry):
+ if entry.is_pyglobal and not entry.is_pyclass_attr:
+ self.qualified_name = [entry.name]
+ else:
+ self.qualified_name.append(entry.name)
+
+ def visit_ClassNode(self, node):
+ self._set_qualname(node, node.name)
+ self.visitchildren(node)
+ return node
+
+ def visit_PyClassNamespaceNode(self, node):
+ # class name was already added by parent node
+ self._set_qualname(node)
+ self.visitchildren(node)
+ return node
+
+ def visit_PyCFunctionNode(self, node):
+ orig_qualified_name = self.qualified_name[:]
+ if node.def_node.is_wrapper and self.qualified_name and self.qualified_name[-1] == '<locals>':
+ self.qualified_name.pop()
+ self._set_qualname(node)
+ else:
+ self._set_qualname(node, node.def_node.name)
+ self.visitchildren(node)
+ self.qualified_name = orig_qualified_name
+ return node
+
+ def visit_DefNode(self, node):
+ if node.is_wrapper and self.qualified_name:
+ assert self.qualified_name[-1] == '<locals>', self.qualified_name
+ orig_qualified_name = self.qualified_name[:]
+ self.qualified_name.pop()
+ self._set_qualname(node)
+ self._super_visit_FuncDefNode(node)
+ self.qualified_name = orig_qualified_name
+ else:
+ self._set_qualname(node, node.name)
+ self.visit_FuncDefNode(node)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ orig_qualified_name = self.qualified_name[:]
+ if getattr(node, 'name', None) == '<lambda>':
+ self.qualified_name.append('<lambda>')
+ else:
+ self._append_entry(node.entry)
+ self.qualified_name.append('<locals>')
+ self._super_visit_FuncDefNode(node)
+ self.qualified_name = orig_qualified_name
+ return node
+
+ def visit_ClassDefNode(self, node):
+ orig_qualified_name = self.qualified_name[:]
+ entry = (getattr(node, 'entry', None) or # PyClass
+ self.current_env().lookup_here(node.name)) # CClass
+ self._append_entry(entry)
+ self._super_visit_ClassDefNode(node)
+ self.qualified_name = orig_qualified_name
+ return node
+
+
+class AnalyseExpressionsTransform(CythonTransform):
+
+ def visit_ModuleNode(self, node):
+ node.scope.infer_types()
+ node.body = node.body.analyse_expressions(node.scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ node.local_scope.infer_types()
+ node.body = node.body.analyse_expressions(node.local_scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_ScopedExprNode(self, node):
+ if node.has_local_scope:
+ node.expr_scope.infer_types()
+ node = node.analyse_scoped_expressions(node.expr_scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_IndexNode(self, node):
+ """
+ Replace index nodes used to specialize cdef functions with fused
+ argument types with the Attribute- or NameNode referring to the
+ function. We then need to copy over the specialization properties to
+ the attribute or name node.
+
+ Because the indexing might be a Python indexing operation on a fused
+ function, or (usually) a Cython indexing operation, we need to
+ re-analyse the types.
+ """
+ self.visit_Node(node)
+ if node.is_fused_index and not node.type.is_error:
+ node = node.base
+ return node
+
+
+class FindInvalidUseOfFusedTypes(CythonTransform):
+
+ def visit_FuncDefNode(self, node):
+ # Errors related to use in functions with fused args will already
+ # have been detected
+ if not node.has_fused_arguments:
+ if not node.is_generator_body and node.return_type.is_fused:
+ error(node.pos, "Return type is not specified as argument type")
+ else:
+ self.visitchildren(node)
+
+ return node
+
+ def visit_ExprNode(self, node):
+ if node.type and node.type.is_fused:
+ error(node.pos, "Invalid use of fused types, type cannot be specialized")
+ else:
+ self.visitchildren(node)
+
+ return node
+
+
+class ExpandInplaceOperators(EnvTransform):
+
+ def visit_InPlaceAssignmentNode(self, node):
+ lhs = node.lhs
+ rhs = node.rhs
+ if lhs.type.is_cpp_class:
+ # No getting around this exact operator here.
+ return node
+ if isinstance(lhs, ExprNodes.BufferIndexNode):
+ # There is code to handle this case in InPlaceAssignmentNode
+ return node
+
+ env = self.current_env()
+ def side_effect_free_reference(node, setting=False):
+ if node.is_name:
+ return node, []
+ elif node.type.is_pyobject and not setting:
+ node = LetRefNode(node)
+ return node, [node]
+ elif node.is_subscript:
+ base, temps = side_effect_free_reference(node.base)
+ index = LetRefNode(node.index)
+ return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
+ elif node.is_attribute:
+ obj, temps = side_effect_free_reference(node.obj)
+ return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
+ elif isinstance(node, ExprNodes.BufferIndexNode):
+ raise ValueError("Don't allow things like attributes of buffer indexing operations")
+ else:
+ node = LetRefNode(node)
+ return node, [node]
+ try:
+ lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
+ except ValueError:
+ return node
+ dup = lhs.__class__(**lhs.__dict__)
+ binop = ExprNodes.binop_node(node.pos,
+ operator = node.operator,
+ operand1 = dup,
+ operand2 = rhs,
+ inplace=True)
+ # Manually analyse types for new node.
+ lhs.analyse_target_types(env)
+ dup.analyse_types(env)
+ binop.analyse_operation(env)
+ node = Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs = lhs,
+ rhs=binop.coerce_to(lhs.type, env))
+ # Use LetRefNode to avoid side effects.
+ let_ref_nodes.reverse()
+ for t in let_ref_nodes:
+ node = LetNode(t, node)
+ return node
+
+ def visit_ExprNode(self, node):
+ # In-place assignments can't happen within an expression.
+ return node
+
+
+class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
+ """
+ Adjust function and class definitions by the decorator directives:
+
+ @cython.cfunc
+ @cython.cclass
+ @cython.ccall
+ @cython.inline
+ @cython.nogil
+ """
+
+ def visit_ModuleNode(self, node):
+ self.directives = node.directives
+ self.in_py_class = False
+ self.visitchildren(node)
+ return node
+
+ def visit_CompilerDirectivesNode(self, node):
+ old_directives = self.directives
+ self.directives = node.directives
+ self.visitchildren(node)
+ self.directives = old_directives
+ return node
+
+ def visit_DefNode(self, node):
+ modifiers = []
+ if 'inline' in self.directives:
+ modifiers.append('inline')
+ nogil = self.directives.get('nogil')
+ except_val = self.directives.get('exceptval')
+ return_type_node = self.directives.get('returns')
+ if return_type_node is None and self.directives['annotation_typing']:
+ return_type_node = node.return_type_annotation
+ # for Python anntations, prefer safe exception handling by default
+ if return_type_node is not None and except_val is None:
+ except_val = (None, True) # except *
+ elif except_val is None:
+ # backward compatible default: no exception check
+ except_val = (None, False)
+ if 'ccall' in self.directives:
+ node = node.as_cfunction(
+ overridable=True, modifiers=modifiers, nogil=nogil,
+ returns=return_type_node, except_val=except_val)
+ return self.visit(node)
+ if 'cfunc' in self.directives:
+ if self.in_py_class:
+ error(node.pos, "cfunc directive is not allowed here")
+ else:
+ node = node.as_cfunction(
+ overridable=False, modifiers=modifiers, nogil=nogil,
+ returns=return_type_node, except_val=except_val)
+ return self.visit(node)
+ if 'inline' in modifiers:
+ error(node.pos, "Python functions cannot be declared 'inline'")
+ if nogil:
+ # TODO: turn this into a "with gil" declaration.
+ error(node.pos, "Python functions cannot be declared 'nogil'")
+ self.visitchildren(node)
+ return node
+
+ def visit_LambdaNode(self, node):
+ # No directives should modify lambdas or generator expressions (and also nothing in them).
+ return node
+
+ def visit_PyClassDefNode(self, node):
+ if 'cclass' in self.directives:
+ node = node.as_cclass()
+ return self.visit(node)
+ else:
+ old_in_pyclass = self.in_py_class
+ self.in_py_class = True
+ self.visitchildren(node)
+ self.in_py_class = old_in_pyclass
+ return node
+
+ def visit_CClassDefNode(self, node):
+ old_in_pyclass = self.in_py_class
+ self.in_py_class = False
+ self.visitchildren(node)
+ self.in_py_class = old_in_pyclass
+ return node
+
+
+class AlignFunctionDefinitions(CythonTransform):
+ """
+ This class takes the signatures from a .pxd file and applies them to
+ the def methods in a .py file.
+ """
+
+ def visit_ModuleNode(self, node):
+ self.scope = node.scope
+ self.directives = node.directives
+ self.imported_names = set() # hack, see visit_FromImportStatNode()
+ self.visitchildren(node)
+ return node
+
+ def visit_PyClassDefNode(self, node):
+ pxd_def = self.scope.lookup(node.name)
+ if pxd_def:
+ if pxd_def.is_cclass:
+ return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
+ elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
+ error(node.pos, "'%s' redeclared" % node.name)
+ if pxd_def.pos:
+ error(pxd_def.pos, "previous declaration here")
+ return None
+ return node
+
+ def visit_CClassDefNode(self, node, pxd_def=None):
+ if pxd_def is None:
+ pxd_def = self.scope.lookup(node.class_name)
+ if pxd_def:
+ if not pxd_def.defined_in_pxd:
+ return node
+ outer_scope = self.scope
+ self.scope = pxd_def.type.scope
+ self.visitchildren(node)
+ if pxd_def:
+ self.scope = outer_scope
+ return node
+
+ def visit_DefNode(self, node):
+ pxd_def = self.scope.lookup(node.name)
+ if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
+ if not pxd_def.is_cfunction:
+ error(node.pos, "'%s' redeclared" % node.name)
+ if pxd_def.pos:
+ error(pxd_def.pos, "previous declaration here")
+ return None
+ node = node.as_cfunction(pxd_def)
+ elif (self.scope.is_module_scope and self.directives['auto_cpdef']
+ and not node.name in self.imported_names
+ and node.is_cdef_func_compatible()):
+ # FIXME: cpdef-ing should be done in analyse_declarations()
+ node = node.as_cfunction(scope=self.scope)
+ # Enable this when nested cdef functions are allowed.
+ # self.visitchildren(node)
+ return node
+
+ def visit_FromImportStatNode(self, node):
+ # hack to prevent conditional import fallback functions from
+ # being cdpef-ed (global Python variables currently conflict
+ # with imports)
+ if self.scope.is_module_scope:
+ for name, _ in node.items:
+ self.imported_names.add(name)
+ return node
+
+ def visit_ExprNode(self, node):
+ # ignore lambdas and everything else that appears in expressions
+ return node
+
+
+class RemoveUnreachableCode(CythonTransform):
+ def visit_StatListNode(self, node):
+ if not self.current_directives['remove_unreachable']:
+ return node
+ self.visitchildren(node)
+ for idx, stat in enumerate(node.stats):
+ idx += 1
+ if stat.is_terminator:
+ if idx < len(node.stats):
+ if self.current_directives['warn.unreachable']:
+ warning(node.stats[idx].pos, "Unreachable code", 2)
+ node.stats = node.stats[:idx]
+ node.is_terminator = True
+ break
+ return node
+
+ def visit_IfClauseNode(self, node):
+ self.visitchildren(node)
+ if node.body.is_terminator:
+ node.is_terminator = True
+ return node
+
+ def visit_IfStatNode(self, node):
+ self.visitchildren(node)
+ if node.else_clause and node.else_clause.is_terminator:
+ for clause in node.if_clauses:
+ if not clause.is_terminator:
+ break
+ else:
+ node.is_terminator = True
+ return node
+
+ def visit_TryExceptStatNode(self, node):
+ self.visitchildren(node)
+ if node.body.is_terminator and node.else_clause:
+ if self.current_directives['warn.unreachable']:
+ warning(node.else_clause.pos, "Unreachable code", 2)
+ node.else_clause = None
+ return node
+
+ def visit_TryFinallyStatNode(self, node):
+ self.visitchildren(node)
+ if node.finally_clause.is_terminator:
+ node.is_terminator = True
+ return node
+
+
+class YieldNodeCollector(TreeVisitor):
+
+ def __init__(self):
+ super(YieldNodeCollector, self).__init__()
+ self.yields = []
+ self.returns = []
+ self.finallys = []
+ self.excepts = []
+ self.has_return_value = False
+ self.has_yield = False
+ self.has_await = False
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+
+ def visit_YieldExprNode(self, node):
+ self.yields.append(node)
+ self.has_yield = True
+ self.visitchildren(node)
+
+ def visit_AwaitExprNode(self, node):
+ self.yields.append(node)
+ self.has_await = True
+ self.visitchildren(node)
+
+ def visit_ReturnStatNode(self, node):
+ self.visitchildren(node)
+ if node.value:
+ self.has_return_value = True
+ self.returns.append(node)
+
+ def visit_TryFinallyStatNode(self, node):
+ self.visitchildren(node)
+ self.finallys.append(node)
+
+ def visit_TryExceptStatNode(self, node):
+ self.visitchildren(node)
+ self.excepts.append(node)
+
+ def visit_ClassDefNode(self, node):
+ pass
+
+ def visit_FuncDefNode(self, node):
+ pass
+
+ def visit_LambdaNode(self, node):
+ pass
+
+ def visit_GeneratorExpressionNode(self, node):
+ pass
+
+ def visit_CArgDeclNode(self, node):
+ # do not look into annotations
+ # FIXME: support (yield) in default arguments (currently crashes)
+ pass
+
+
+class MarkClosureVisitor(CythonTransform):
+
+ def visit_ModuleNode(self, node):
+ self.needs_closure = False
+ self.visitchildren(node)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ self.needs_closure = False
+ self.visitchildren(node)
+ node.needs_closure = self.needs_closure
+ self.needs_closure = True
+
+ collector = YieldNodeCollector()
+ collector.visitchildren(node)
+
+ if node.is_async_def:
+ coroutine_type = Nodes.AsyncDefNode
+ if collector.has_yield:
+ coroutine_type = Nodes.AsyncGenNode
+ for yield_expr in collector.yields + collector.returns:
+ yield_expr.in_async_gen = True
+ elif self.current_directives['iterable_coroutine']:
+ coroutine_type = Nodes.IterableAsyncDefNode
+ elif collector.has_await:
+ found = next(y for y in collector.yields if y.is_await)
+ error(found.pos, "'await' not allowed in generators (use 'yield')")
+ return node
+ elif collector.has_yield:
+ coroutine_type = Nodes.GeneratorDefNode
+ else:
+ return node
+
+ for i, yield_expr in enumerate(collector.yields, 1):
+ yield_expr.label_num = i
+ for retnode in collector.returns + collector.finallys + collector.excepts:
+ retnode.in_generator = True
+
+ gbody = Nodes.GeneratorBodyDefNode(
+ pos=node.pos, name=node.name, body=node.body,
+ is_async_gen_body=node.is_async_def and collector.has_yield)
+ coroutine = coroutine_type(
+ pos=node.pos, name=node.name, args=node.args,
+ star_arg=node.star_arg, starstar_arg=node.starstar_arg,
+ doc=node.doc, decorators=node.decorators,
+ gbody=gbody, lambda_name=node.lambda_name,
+ return_type_annotation=node.return_type_annotation)
+ return coroutine
+
+ def visit_CFuncDefNode(self, node):
+ self.needs_closure = False
+ self.visitchildren(node)
+ node.needs_closure = self.needs_closure
+ self.needs_closure = True
+ if node.needs_closure and node.overridable:
+ error(node.pos, "closures inside cpdef functions not yet supported")
+ return node
+
+ def visit_LambdaNode(self, node):
+ self.needs_closure = False
+ self.visitchildren(node)
+ node.needs_closure = self.needs_closure
+ self.needs_closure = True
+ return node
+
+ def visit_ClassDefNode(self, node):
+ self.visitchildren(node)
+ self.needs_closure = True
+ return node
+
+
+class CreateClosureClasses(CythonTransform):
+ # Output closure classes in module scope for all functions
+ # that really need it.
+
+ def __init__(self, context):
+ super(CreateClosureClasses, self).__init__(context)
+ self.path = []
+ self.in_lambda = False
+
+ def visit_ModuleNode(self, node):
+ self.module_scope = node.scope
+ self.visitchildren(node)
+ return node
+
+ def find_entries_used_in_closures(self, node):
+ from_closure = []
+ in_closure = []
+ for scope in node.local_scope.iter_local_scopes():
+ for name, entry in scope.entries.items():
+ if not name:
+ continue
+ if entry.from_closure:
+ from_closure.append((name, entry))
+ elif entry.in_closure:
+ in_closure.append((name, entry))
+ return from_closure, in_closure
+
+ def create_class_from_scope(self, node, target_module_scope, inner_node=None):
+ # move local variables into closure
+ if node.is_generator:
+ for scope in node.local_scope.iter_local_scopes():
+ for entry in scope.entries.values():
+ if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal):
+ entry.in_closure = True
+
+ from_closure, in_closure = self.find_entries_used_in_closures(node)
+ in_closure.sort()
+
+ # Now from the beginning
+ node.needs_closure = False
+ node.needs_outer_scope = False
+
+ func_scope = node.local_scope
+ cscope = node.entry.scope
+ while cscope.is_py_class_scope or cscope.is_c_class_scope:
+ cscope = cscope.outer_scope
+
+ if not from_closure and (self.path or inner_node):
+ if not inner_node:
+ if not node.py_cfunc_node:
+ raise InternalError("DefNode does not have assignment node")
+ inner_node = node.py_cfunc_node
+ inner_node.needs_self_code = False
+ node.needs_outer_scope = False
+
+ if node.is_generator:
+ pass
+ elif not in_closure and not from_closure:
+ return
+ elif not in_closure:
+ func_scope.is_passthrough = True
+ func_scope.scope_class = cscope.scope_class
+ node.needs_outer_scope = True
+ return
+
+ # entry.cname can contain periods (eg. a derived C method of a class).
+ # We want to use the cname as part of a C struct name, so we replace
+ # periods with double underscores.
+ as_name = '%s_%s' % (
+ target_module_scope.next_id(Naming.closure_class_prefix),
+ node.entry.cname.replace('.','__'))
+
+ entry = target_module_scope.declare_c_class(
+ name=as_name, pos=node.pos, defining=True,
+ implementing=True)
+ entry.type.is_final_type = True
+
+ func_scope.scope_class = entry
+ class_scope = entry.type.scope
+ class_scope.is_internal = True
+ class_scope.is_closure_class_scope = True
+ if node.is_async_def or node.is_generator:
+ # Generators need their closure intact during cleanup as they resume to handle GeneratorExit
+ class_scope.directives['no_gc_clear'] = True
+ if Options.closure_freelist_size:
+ class_scope.directives['freelist'] = Options.closure_freelist_size
+
+ if from_closure:
+ assert cscope.is_closure_scope
+ class_scope.declare_var(pos=node.pos,
+ name=Naming.outer_scope_cname,
+ cname=Naming.outer_scope_cname,
+ type=cscope.scope_class.type,
+ is_cdef=True)
+ node.needs_outer_scope = True
+ for name, entry in in_closure:
+ closure_entry = class_scope.declare_var(
+ pos=entry.pos,
+ name=entry.name if not entry.in_subscope else None,
+ cname=entry.cname,
+ type=entry.type,
+ is_cdef=True)
+ if entry.is_declared_generic:
+ closure_entry.is_declared_generic = 1
+ node.needs_closure = True
+ # Do it here because other classes are already checked
+ target_module_scope.check_c_class(func_scope.scope_class)
+
+ def visit_LambdaNode(self, node):
+ if not isinstance(node.def_node, Nodes.DefNode):
+ # fused function, an error has been previously issued
+ return node
+
+ was_in_lambda = self.in_lambda
+ self.in_lambda = True
+ self.create_class_from_scope(node.def_node, self.module_scope, node)
+ self.visitchildren(node)
+ self.in_lambda = was_in_lambda
+ return node
+
+ def visit_FuncDefNode(self, node):
+ if self.in_lambda:
+ self.visitchildren(node)
+ return node
+ if node.needs_closure or self.path:
+ self.create_class_from_scope(node, self.module_scope)
+ self.path.append(node)
+ self.visitchildren(node)
+ self.path.pop()
+ return node
+
+ def visit_GeneratorBodyDefNode(self, node):
+ self.visitchildren(node)
+ return node
+
+ def visit_CFuncDefNode(self, node):
+ if not node.overridable:
+ return self.visit_FuncDefNode(node)
+ else:
+ self.visitchildren(node)
+ return node
+
+
+class InjectGilHandling(VisitorTransform, SkipDeclarations):
+ """
+ Allow certain Python operations inside of nogil blocks by implicitly acquiring the GIL.
+
+ Must run before the AnalyseDeclarationsTransform to make sure the GILStatNodes get
+ set up, parallel sections know that the GIL is acquired inside of them, etc.
+ """
+ def __call__(self, root):
+ self.nogil = False
+ return super(InjectGilHandling, self).__call__(root)
+
+ # special node handling
+
+ def visit_RaiseStatNode(self, node):
+ """Allow raising exceptions in nogil sections by wrapping them in a 'with gil' block."""
+ if self.nogil:
+ node = Nodes.GILStatNode(node.pos, state='gil', body=node)
+ return node
+
+ # further candidates:
+ # def visit_AssertStatNode(self, node):
+ # def visit_ReraiseStatNode(self, node):
+
+ # nogil tracking
+
+ def visit_GILStatNode(self, node):
+ was_nogil = self.nogil
+ self.nogil = (node.state == 'nogil')
+ self.visitchildren(node)
+ self.nogil = was_nogil
+ return node
+
+ def visit_CFuncDefNode(self, node):
+ was_nogil = self.nogil
+ if isinstance(node.declarator, Nodes.CFuncDeclaratorNode):
+ self.nogil = node.declarator.nogil and not node.declarator.with_gil
+ self.visitchildren(node)
+ self.nogil = was_nogil
+ return node
+
+ def visit_ParallelRangeNode(self, node):
+ was_nogil = self.nogil
+ self.nogil = node.nogil
+ self.visitchildren(node)
+ self.nogil = was_nogil
+ return node
+
+ def visit_ExprNode(self, node):
+ # No special GIL handling inside of expressions for now.
+ return node
+
+ visit_Node = VisitorTransform.recurse_to_children
+
+
+class GilCheck(VisitorTransform):
+ """
+ Call `node.gil_check(env)` on each node to make sure we hold the
+ GIL when we need it. Raise an error when on Python operations
+ inside a `nogil` environment.
+
+ Additionally, raise exceptions for closely nested with gil or with nogil
+ statements. The latter would abort Python.
+ """
+
+ def __call__(self, root):
+ self.env_stack = [root.scope]
+ self.nogil = False
+
+ # True for 'cdef func() nogil:' functions, as the GIL may be held while
+ # calling this function (thus contained 'nogil' blocks may be valid).
+ self.nogil_declarator_only = False
+ return super(GilCheck, self).__call__(root)
+
+ def _visit_scoped_children(self, node, gil_state):
+ was_nogil = self.nogil
+ outer_attrs = node.outer_attrs
+ if outer_attrs and len(self.env_stack) > 1:
+ self.nogil = self.env_stack[-2].nogil
+ self.visitchildren(node, outer_attrs)
+
+ self.nogil = gil_state
+ self.visitchildren(node, attrs=None, exclude=outer_attrs)
+ self.nogil = was_nogil
+
+ def visit_FuncDefNode(self, node):
+ self.env_stack.append(node.local_scope)
+ inner_nogil = node.local_scope.nogil
+
+ if inner_nogil:
+ self.nogil_declarator_only = True
+
+ if inner_nogil and node.nogil_check:
+ node.nogil_check(node.local_scope)
+
+ self._visit_scoped_children(node, inner_nogil)
+
+ # This cannot be nested, so it doesn't need backup/restore
+ self.nogil_declarator_only = False
+
+ self.env_stack.pop()
+ return node
+
+ def visit_GILStatNode(self, node):
+ if self.nogil and node.nogil_check:
+ node.nogil_check()
+
+ was_nogil = self.nogil
+ is_nogil = (node.state == 'nogil')
+
+ if was_nogil == is_nogil and not self.nogil_declarator_only:
+ if not was_nogil:
+ error(node.pos, "Trying to acquire the GIL while it is "
+ "already held.")
+ else:
+ error(node.pos, "Trying to release the GIL while it was "
+ "previously released.")
+
+ if isinstance(node.finally_clause, Nodes.StatListNode):
+ # The finally clause of the GILStatNode is a GILExitNode,
+ # which is wrapped in a StatListNode. Just unpack that.
+ node.finally_clause, = node.finally_clause.stats
+
+ self._visit_scoped_children(node, is_nogil)
+ return node
+
+ def visit_ParallelRangeNode(self, node):
+ if node.nogil:
+ node.nogil = False
+ node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
+ return self.visit_GILStatNode(node)
+
+ if not self.nogil:
+ error(node.pos, "prange() can only be used without the GIL")
+ # Forget about any GIL-related errors that may occur in the body
+ return None
+
+ node.nogil_check(self.env_stack[-1])
+ self.visitchildren(node)
+ return node
+
+ def visit_ParallelWithBlockNode(self, node):
+ if not self.nogil:
+ error(node.pos, "The parallel section may only be used without "
+ "the GIL")
+ return None
+
+ if node.nogil_check:
+ # It does not currently implement this, but test for it anyway to
+ # avoid potential future surprises
+ node.nogil_check(self.env_stack[-1])
+
+ self.visitchildren(node)
+ return node
+
+ def visit_TryFinallyStatNode(self, node):
+ """
+ Take care of try/finally statements in nogil code sections.
+ """
+ if not self.nogil or isinstance(node, Nodes.GILStatNode):
+ return self.visit_Node(node)
+
+ node.nogil_check = None
+ node.is_try_finally_in_nogil = True
+ self.visitchildren(node)
+ return node
+
+ def visit_Node(self, node):
+ if self.env_stack and self.nogil and node.nogil_check:
+ node.nogil_check(self.env_stack[-1])
+ if node.outer_attrs:
+ self._visit_scoped_children(node, self.nogil)
+ else:
+ self.visitchildren(node)
+ if self.nogil:
+ node.in_nogil_context = True
+ return node
+
+
+class TransformBuiltinMethods(EnvTransform):
+ """
+ Replace Cython's own cython.* builtins by the corresponding tree nodes.
+ """
+
+ def visit_SingleAssignmentNode(self, node):
+ if node.declaration_only:
+ return None
+ else:
+ self.visitchildren(node)
+ return node
+
+ def visit_AttributeNode(self, node):
+ self.visitchildren(node)
+ return self.visit_cython_attribute(node)
+
+ def visit_NameNode(self, node):
+ return self.visit_cython_attribute(node)
+
+ def visit_cython_attribute(self, node):
+ attribute = node.as_cython_attribute()
+ if attribute:
+ if attribute == u'compiled':
+ node = ExprNodes.BoolNode(node.pos, value=True)
+ elif attribute == u'__version__':
+ from .. import __version__ as version
+ node = ExprNodes.StringNode(node.pos, value=EncodedString(version))
+ elif attribute == u'NULL':
+ node = ExprNodes.NullNode(node.pos)
+ elif attribute in (u'set', u'frozenset', u'staticmethod'):
+ node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
+ entry=self.current_env().builtin_scope().lookup_here(attribute))
+ elif PyrexTypes.parse_basic_type(attribute):
+ pass
+ elif self.context.cython_scope.lookup_qualified_name(attribute):
+ pass
+ else:
+ error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
+ return node
+
+ def visit_ExecStatNode(self, node):
+ lenv = self.current_env()
+ self.visitchildren(node)
+ if len(node.args) == 1:
+ node.args.append(ExprNodes.GlobalsExprNode(node.pos))
+ if not lenv.is_module_scope:
+ node.args.append(
+ ExprNodes.LocalsExprNode(
+ node.pos, self.current_scope_node(), lenv))
+ return node
+
+ def _inject_locals(self, node, func_name):
+ # locals()/dir()/vars() builtins
+ lenv = self.current_env()
+ entry = lenv.lookup_here(func_name)
+ if entry:
+ # not the builtin
+ return node
+ pos = node.pos
+ if func_name in ('locals', 'vars'):
+ if func_name == 'locals' and len(node.args) > 0:
+ error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
+ % len(node.args))
+ return node
+ elif func_name == 'vars':
+ if len(node.args) > 1:
+ error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
+ % len(node.args))
+ if len(node.args) > 0:
+ return node # nothing to do
+ return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
+ else: # dir()
+ if len(node.args) > 1:
+ error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
+ % len(node.args))
+ if len(node.args) > 0:
+ # optimised in Builtin.py
+ return node
+ if lenv.is_py_class_scope or lenv.is_module_scope:
+ if lenv.is_py_class_scope:
+ pyclass = self.current_scope_node()
+ locals_dict = ExprNodes.CloneNode(pyclass.dict)
+ else:
+ locals_dict = ExprNodes.GlobalsExprNode(pos)
+ return ExprNodes.SortedDictKeysNode(locals_dict)
+ local_names = sorted(var.name for var in lenv.entries.values() if var.name)
+ items = [ExprNodes.IdentifierStringNode(pos, value=var)
+ for var in local_names]
+ return ExprNodes.ListNode(pos, args=items)
+
+ def visit_PrimaryCmpNode(self, node):
+ # special case: for in/not-in test, we do not need to sort locals()
+ self.visitchildren(node)
+ if node.operator in 'not_in': # in/not_in
+ if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
+ arg = node.operand2.arg
+ if isinstance(arg, ExprNodes.NoneCheckNode):
+ arg = arg.arg
+ node.operand2 = arg
+ return node
+
+ def visit_CascadedCmpNode(self, node):
+ return self.visit_PrimaryCmpNode(node)
+
+ def _inject_eval(self, node, func_name):
+ lenv = self.current_env()
+ entry = lenv.lookup_here(func_name)
+ if entry or len(node.args) != 1:
+ return node
+ # Inject globals and locals
+ node.args.append(ExprNodes.GlobalsExprNode(node.pos))
+ if not lenv.is_module_scope:
+ node.args.append(
+ ExprNodes.LocalsExprNode(
+ node.pos, self.current_scope_node(), lenv))
+ return node
+
+ def _inject_super(self, node, func_name):
+ lenv = self.current_env()
+ entry = lenv.lookup_here(func_name)
+ if entry or node.args:
+ return node
+ # Inject no-args super
+ def_node = self.current_scope_node()
+ if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
+ len(self.env_stack) < 2):
+ return node
+ class_node, class_scope = self.env_stack[-2]
+ if class_scope.is_py_class_scope:
+ def_node.requires_classobj = True
+ class_node.class_cell.is_active = True
+ node.args = [
+ ExprNodes.ClassCellNode(
+ node.pos, is_generator=def_node.is_generator),
+ ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
+ ]
+ elif class_scope.is_c_class_scope:
+ node.args = [
+ ExprNodes.NameNode(
+ node.pos, name=class_node.scope.name,
+ entry=class_node.entry),
+ ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
+ ]
+ return node
+
+ def visit_SimpleCallNode(self, node):
+ # cython.foo
+ function = node.function.as_cython_attribute()
+ if function:
+ if function in InterpretCompilerDirectives.unop_method_nodes:
+ if len(node.args) != 1:
+ error(node.function.pos, u"%s() takes exactly one argument" % function)
+ else:
+ node = InterpretCompilerDirectives.unop_method_nodes[function](
+ node.function.pos, operand=node.args[0])
+ elif function in InterpretCompilerDirectives.binop_method_nodes:
+ if len(node.args) != 2:
+ error(node.function.pos, u"%s() takes exactly two arguments" % function)
+ else:
+ node = InterpretCompilerDirectives.binop_method_nodes[function](
+ node.function.pos, operand1=node.args[0], operand2=node.args[1])
+ elif function == u'cast':
+ if len(node.args) != 2:
+ error(node.function.pos,
+ u"cast() takes exactly two arguments and an optional typecheck keyword")
+ else:
+ type = node.args[0].analyse_as_type(self.current_env())
+ if type:
+ node = ExprNodes.TypecastNode(
+ node.function.pos, type=type, operand=node.args[1], typecheck=False)
+ else:
+ error(node.args[0].pos, "Not a type")
+ elif function == u'sizeof':
+ if len(node.args) != 1:
+ error(node.function.pos, u"sizeof() takes exactly one argument")
+ else:
+ type = node.args[0].analyse_as_type(self.current_env())
+ if type:
+ node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
+ else:
+ node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
+ elif function == 'cmod':
+ if len(node.args) != 2:
+ error(node.function.pos, u"cmod() takes exactly two arguments")
+ else:
+ node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
+ node.cdivision = True
+ elif function == 'cdiv':
+ if len(node.args) != 2:
+ error(node.function.pos, u"cdiv() takes exactly two arguments")
+ else:
+ node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
+ node.cdivision = True
+ elif function == u'set':
+ node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
+ elif function == u'staticmethod':
+ node.function = ExprNodes.NameNode(node.pos, name=EncodedString('staticmethod'))
+ elif self.context.cython_scope.lookup_qualified_name(function):
+ pass
+ else:
+ error(node.function.pos,
+ u"'%s' not a valid cython language construct" % function)
+
+ self.visitchildren(node)
+
+ if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
+ func_name = node.function.name
+ if func_name in ('dir', 'locals', 'vars'):
+ return self._inject_locals(node, func_name)
+ if func_name == 'eval':
+ return self._inject_eval(node, func_name)
+ if func_name == 'super':
+ return self._inject_super(node, func_name)
+ return node
+
+ def visit_GeneralCallNode(self, node):
+ function = node.function.as_cython_attribute()
+ if function == u'cast':
+ # NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args
+ args = node.positional_args.args
+ kwargs = node.keyword_args.compile_time_value(None)
+ if (len(args) != 2 or len(kwargs) > 1 or
+ (len(kwargs) == 1 and 'typecheck' not in kwargs)):
+ error(node.function.pos,
+ u"cast() takes exactly two arguments and an optional typecheck keyword")
+ else:
+ type = args[0].analyse_as_type(self.current_env())
+ if type:
+ typecheck = kwargs.get('typecheck', False)
+ node = ExprNodes.TypecastNode(
+ node.function.pos, type=type, operand=args[1], typecheck=typecheck)
+ else:
+ error(args[0].pos, "Not a type")
+
+ self.visitchildren(node)
+ return node
+
+
+class ReplaceFusedTypeChecks(VisitorTransform):
+ """
+ This is not a transform in the pipeline. It is invoked on the specific
+ versions of a cdef function with fused argument types. It filters out any
+ type branches that don't match. e.g.
+
+ if fused_t is mytype:
+ ...
+ elif fused_t in other_fused_type:
+ ...
+ """
+ def __init__(self, local_scope):
+ super(ReplaceFusedTypeChecks, self).__init__()
+ self.local_scope = local_scope
+ # defer the import until now to avoid circular import time dependencies
+ from .Optimize import ConstantFolding
+ self.transform = ConstantFolding(reevaluate=True)
+
+ def visit_IfStatNode(self, node):
+ """
+ Filters out any if clauses with false compile time type check
+ expression.
+ """
+ self.visitchildren(node)
+ return self.transform(node)
+
+ def visit_PrimaryCmpNode(self, node):
+ with Errors.local_errors(ignore=True):
+ type1 = node.operand1.analyse_as_type(self.local_scope)
+ type2 = node.operand2.analyse_as_type(self.local_scope)
+
+ if type1 and type2:
+ false_node = ExprNodes.BoolNode(node.pos, value=False)
+ true_node = ExprNodes.BoolNode(node.pos, value=True)
+
+ type1 = self.specialize_type(type1, node.operand1.pos)
+ op = node.operator
+
+ if op in ('is', 'is_not', '==', '!='):
+ type2 = self.specialize_type(type2, node.operand2.pos)
+
+ is_same = type1.same_as(type2)
+ eq = op in ('is', '==')
+
+ if (is_same and eq) or (not is_same and not eq):
+ return true_node
+
+ elif op in ('in', 'not_in'):
+ # We have to do an instance check directly, as operand2
+ # needs to be a fused type and not a type with a subtype
+ # that is fused. First unpack the typedef
+ if isinstance(type2, PyrexTypes.CTypedefType):
+ type2 = type2.typedef_base_type
+
+ if type1.is_fused:
+ error(node.operand1.pos, "Type is fused")
+ elif not type2.is_fused:
+ error(node.operand2.pos,
+ "Can only use 'in' or 'not in' on a fused type")
+ else:
+ types = PyrexTypes.get_specialized_types(type2)
+
+ for specialized_type in types:
+ if type1.same_as(specialized_type):
+ if op == 'in':
+ return true_node
+ else:
+ return false_node
+
+ if op == 'not_in':
+ return true_node
+
+ return false_node
+
+ return node
+
+ def specialize_type(self, type, pos):
+ try:
+ return type.specialize(self.local_scope.fused_to_specific)
+ except KeyError:
+ error(pos, "Type is not specific")
+ return type
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ return node
+
+
+class DebugTransform(CythonTransform):
+ """
+ Write debug information for this Cython module.
+ """
+
+ def __init__(self, context, options, result):
+ super(DebugTransform, self).__init__(context)
+ self.visited = set()
+ # our treebuilder and debug output writer
+ # (see Cython.Debugger.debug_output.CythonDebugWriter)
+ self.tb = self.context.gdb_debug_outputwriter
+ #self.c_output_file = options.output_file
+ self.c_output_file = result.c_file
+
+ # Closure support, basically treat nested functions as if the AST were
+ # never nested
+ self.nested_funcdefs = []
+
+ # tells visit_NameNode whether it should register step-into functions
+ self.register_stepinto = False
+
+ def visit_ModuleNode(self, node):
+ self.tb.module_name = node.full_module_name
+ attrs = dict(
+ module_name=node.full_module_name,
+ filename=node.pos[0].filename,
+ c_filename=self.c_output_file)
+
+ self.tb.start('Module', attrs)
+
+ # serialize functions
+ self.tb.start('Functions')
+ # First, serialize functions normally...
+ self.visitchildren(node)
+
+ # ... then, serialize nested functions
+ for nested_funcdef in self.nested_funcdefs:
+ self.visit_FuncDefNode(nested_funcdef)
+
+ self.register_stepinto = True
+ self.serialize_modulenode_as_function(node)
+ self.register_stepinto = False
+ self.tb.end('Functions')
+
+ # 2.3 compatibility. Serialize global variables
+ self.tb.start('Globals')
+ entries = {}
+
+ for k, v in node.scope.entries.items():
+ if (v.qualified_name not in self.visited and not
+ v.name.startswith('__pyx_') and not
+ v.type.is_cfunction and not
+ v.type.is_extension_type):
+ entries[k]= v
+
+ self.serialize_local_variables(entries)
+ self.tb.end('Globals')
+ # self.tb.end('Module') # end Module after the line number mapping in
+ # Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
+ return node
+
+ def visit_FuncDefNode(self, node):
+ self.visited.add(node.local_scope.qualified_name)
+
+ if getattr(node, 'is_wrapper', False):
+ return node
+
+ if self.register_stepinto:
+ self.nested_funcdefs.append(node)
+ return node
+
+ # node.entry.visibility = 'extern'
+ if node.py_func is None:
+ pf_cname = ''
+ else:
+ pf_cname = node.py_func.entry.func_cname
+
+ attrs = dict(
+ name=node.entry.name or getattr(node, 'name', '<unknown>'),
+ cname=node.entry.func_cname,
+ pf_cname=pf_cname,
+ qualified_name=node.local_scope.qualified_name,
+ lineno=str(node.pos[1]))
+
+ self.tb.start('Function', attrs=attrs)
+
+ self.tb.start('Locals')
+ self.serialize_local_variables(node.local_scope.entries)
+ self.tb.end('Locals')
+
+ self.tb.start('Arguments')
+ for arg in node.local_scope.arg_entries:
+ self.tb.start(arg.name)
+ self.tb.end(arg.name)
+ self.tb.end('Arguments')
+
+ self.tb.start('StepIntoFunctions')
+ self.register_stepinto = True
+ self.visitchildren(node)
+ self.register_stepinto = False
+ self.tb.end('StepIntoFunctions')
+ self.tb.end('Function')
+
+ return node
+
+ def visit_NameNode(self, node):
+ if (self.register_stepinto and
+ node.type is not None and
+ node.type.is_cfunction and
+ getattr(node, 'is_called', False) and
+ node.entry.func_cname is not None):
+ # don't check node.entry.in_cinclude, as 'cdef extern: ...'
+ # declared functions are not 'in_cinclude'.
+ # This means we will list called 'cdef' functions as
+ # "step into functions", but this is not an issue as they will be
+ # recognized as Cython functions anyway.
+ attrs = dict(name=node.entry.func_cname)
+ self.tb.start('StepIntoFunction', attrs=attrs)
+ self.tb.end('StepIntoFunction')
+
+ self.visitchildren(node)
+ return node
+
+ def serialize_modulenode_as_function(self, node):
+ """
+ Serialize the module-level code as a function so the debugger will know
+ it's a "relevant frame" and it will know where to set the breakpoint
+ for 'break modulename'.
+ """
+ name = node.full_module_name.rpartition('.')[-1]
+
+ cname_py2 = 'init' + name
+ cname_py3 = 'PyInit_' + name
+
+ py2_attrs = dict(
+ name=name,
+ cname=cname_py2,
+ pf_cname='',
+ # Ignore the qualified_name, breakpoints should be set using
+ # `cy break modulename:lineno` for module-level breakpoints.
+ qualified_name='',
+ lineno='1',
+ is_initmodule_function="True",
+ )
+
+ py3_attrs = dict(py2_attrs, cname=cname_py3)
+
+ self._serialize_modulenode_as_function(node, py2_attrs)
+ self._serialize_modulenode_as_function(node, py3_attrs)
+
+ def _serialize_modulenode_as_function(self, node, attrs):
+ self.tb.start('Function', attrs=attrs)
+
+ self.tb.start('Locals')
+ self.serialize_local_variables(node.scope.entries)
+ self.tb.end('Locals')
+
+ self.tb.start('Arguments')
+ self.tb.end('Arguments')
+
+ self.tb.start('StepIntoFunctions')
+ self.register_stepinto = True
+ self.visitchildren(node)
+ self.register_stepinto = False
+ self.tb.end('StepIntoFunctions')
+
+ self.tb.end('Function')
+
+ def serialize_local_variables(self, entries):
+ for entry in entries.values():
+ if not entry.cname:
+ # not a local variable
+ continue
+ if entry.type.is_pyobject:
+ vartype = 'PythonObject'
+ else:
+ vartype = 'CObject'
+
+ if entry.from_closure:
+ # We're dealing with a closure where a variable from an outer
+ # scope is accessed, get it from the scope object.
+ cname = '%s->%s' % (Naming.cur_scope_cname,
+ entry.outer_entry.cname)
+
+ qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
+ entry.scope.name,
+ entry.name)
+ elif entry.in_closure:
+ cname = '%s->%s' % (Naming.cur_scope_cname,
+ entry.cname)
+ qname = entry.qualified_name
+ else:
+ cname = entry.cname
+ qname = entry.qualified_name
+
+ if not entry.pos:
+ # this happens for variables that are not in the user's code,
+ # e.g. for the global __builtins__, __doc__, etc. We can just
+ # set the lineno to 0 for those.
+ lineno = '0'
+ else:
+ lineno = str(entry.pos[1])
+
+ attrs = dict(
+ name=entry.name,
+ cname=cname,
+ qualified_name=qname,
+ type=vartype,
+ lineno=lineno)
+
+ self.tb.start('LocalVar', attrs)
+ self.tb.end('LocalVar')
diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.pxd b/contrib/tools/cython/Cython/Compiler/Parsing.pxd
new file mode 100644
index 0000000000..25453b39ab
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Parsing.pxd
@@ -0,0 +1,199 @@
+# We declare all of these here to type the first argument.
+
+from __future__ import absolute_import
+
+cimport cython
+from .Scanning cimport PyrexScanner
+
+ctypedef object (*p_sub_expr_func)(PyrexScanner obj)
+
+# entry points
+
+cpdef p_module(PyrexScanner s, pxd, full_module_name, ctx=*)
+cpdef p_code(PyrexScanner s, level= *, ctx=*)
+
+# internal parser states
+
+cdef p_ident(PyrexScanner s, message =*)
+cdef p_ident_list(PyrexScanner s)
+
+cdef tuple p_binop_operator(PyrexScanner s)
+cdef p_binop_expr(PyrexScanner s, ops, p_sub_expr_func p_sub_expr)
+cdef p_lambdef(PyrexScanner s, bint allow_conditional=*)
+cdef p_lambdef_nocond(PyrexScanner s)
+cdef p_test(PyrexScanner s)
+cdef p_test_nocond(PyrexScanner s)
+cdef p_or_test(PyrexScanner s)
+cdef p_rassoc_binop_expr(PyrexScanner s, ops, p_sub_expr_func p_subexpr)
+cdef p_and_test(PyrexScanner s)
+cdef p_not_test(PyrexScanner s)
+cdef p_comparison(PyrexScanner s)
+cdef p_test_or_starred_expr(PyrexScanner s)
+cdef p_starred_expr(PyrexScanner s)
+cdef p_cascaded_cmp(PyrexScanner s)
+cdef p_cmp_op(PyrexScanner s)
+cdef p_bit_expr(PyrexScanner s)
+cdef p_xor_expr(PyrexScanner s)
+cdef p_and_expr(PyrexScanner s)
+cdef p_shift_expr(PyrexScanner s)
+cdef p_arith_expr(PyrexScanner s)
+cdef p_term(PyrexScanner s)
+cdef p_factor(PyrexScanner s)
+cdef _p_factor(PyrexScanner s)
+cdef p_typecast(PyrexScanner s)
+cdef p_sizeof(PyrexScanner s)
+cdef p_yield_expression(PyrexScanner s)
+cdef p_yield_statement(PyrexScanner s)
+cdef p_async_statement(PyrexScanner s, ctx, decorators)
+cdef p_power(PyrexScanner s)
+cdef p_new_expr(PyrexScanner s)
+cdef p_trailer(PyrexScanner s, node1)
+cdef p_call_parse_args(PyrexScanner s, bint allow_genexp = *)
+cdef p_call_build_packed_args(pos, positional_args, keyword_args)
+cdef p_call(PyrexScanner s, function)
+cdef p_index(PyrexScanner s, base)
+cdef tuple p_subscript_list(PyrexScanner s)
+cdef p_subscript(PyrexScanner s)
+cdef p_slice_element(PyrexScanner s, follow_set)
+cdef expect_ellipsis(PyrexScanner s)
+cdef make_slice_nodes(pos, subscripts)
+cpdef make_slice_node(pos, start, stop = *, step = *)
+cdef p_atom(PyrexScanner s)
+@cython.locals(value=unicode)
+cdef p_int_literal(PyrexScanner s)
+cdef p_name(PyrexScanner s, name)
+cdef wrap_compile_time_constant(pos, value)
+cdef p_cat_string_literal(PyrexScanner s)
+cdef p_opt_string_literal(PyrexScanner s, required_type=*)
+cdef bint check_for_non_ascii_characters(unicode string)
+@cython.locals(systr=unicode, is_python3_source=bint, is_raw=bint)
+cdef p_string_literal(PyrexScanner s, kind_override=*)
+cdef _append_escape_sequence(kind, builder, unicode escape_sequence, PyrexScanner s)
+cdef tuple _f_string_error_pos(pos, string, Py_ssize_t i)
+@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, next_start=Py_ssize_t)
+cdef list p_f_string(PyrexScanner s, unicode_value, pos, bint is_raw)
+@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, quote_char=Py_UCS4, NO_CHAR=Py_UCS4)
+cdef tuple p_f_string_expr(PyrexScanner s, unicode_value, pos, Py_ssize_t starting_index, bint is_raw)
+cdef p_list_maker(PyrexScanner s)
+cdef p_comp_iter(PyrexScanner s, body)
+cdef p_comp_for(PyrexScanner s, body)
+cdef p_comp_if(PyrexScanner s, body)
+cdef p_dict_or_set_maker(PyrexScanner s)
+cdef p_backquote_expr(PyrexScanner s)
+cdef p_simple_expr_list(PyrexScanner s, expr=*)
+cdef p_test_or_starred_expr_list(PyrexScanner s, expr=*)
+cdef p_testlist(PyrexScanner s)
+cdef p_testlist_star_expr(PyrexScanner s)
+cdef p_testlist_comp(PyrexScanner s)
+cdef p_genexp(PyrexScanner s, expr)
+
+#-------------------------------------------------------
+#
+# Statements
+#
+#-------------------------------------------------------
+
+cdef p_global_statement(PyrexScanner s)
+cdef p_nonlocal_statement(PyrexScanner s)
+cdef p_expression_or_assignment(PyrexScanner s)
+cdef p_print_statement(PyrexScanner s)
+cdef p_exec_statement(PyrexScanner s)
+cdef p_del_statement(PyrexScanner s)
+cdef p_pass_statement(PyrexScanner s, bint with_newline = *)
+cdef p_break_statement(PyrexScanner s)
+cdef p_continue_statement(PyrexScanner s)
+cdef p_return_statement(PyrexScanner s)
+cdef p_raise_statement(PyrexScanner s)
+cdef p_import_statement(PyrexScanner s)
+cdef p_from_import_statement(PyrexScanner s, bint first_statement = *)
+cdef p_imported_name(PyrexScanner s, bint is_cimport)
+cdef p_dotted_name(PyrexScanner s, bint as_allowed)
+cdef p_as_name(PyrexScanner s)
+cdef p_assert_statement(PyrexScanner s)
+cdef p_if_statement(PyrexScanner s)
+cdef p_if_clause(PyrexScanner s)
+cdef p_else_clause(PyrexScanner s)
+cdef p_while_statement(PyrexScanner s)
+cdef p_for_statement(PyrexScanner s, bint is_async=*)
+cdef dict p_for_bounds(PyrexScanner s, bint allow_testlist=*, bint is_async=*)
+cdef p_for_from_relation(PyrexScanner s)
+cdef p_for_from_step(PyrexScanner s)
+cdef p_target(PyrexScanner s, terminator)
+cdef p_for_target(PyrexScanner s)
+cdef p_for_iterator(PyrexScanner s, bint allow_testlist=*, bint is_async=*)
+cdef p_try_statement(PyrexScanner s)
+cdef p_except_clause(PyrexScanner s)
+cdef p_include_statement(PyrexScanner s, ctx)
+cdef p_with_statement(PyrexScanner s)
+cdef p_with_items(PyrexScanner s, bint is_async=*)
+cdef p_with_template(PyrexScanner s)
+cdef p_simple_statement(PyrexScanner s, bint first_statement = *)
+cdef p_simple_statement_list(PyrexScanner s, ctx, bint first_statement = *)
+cdef p_compile_time_expr(PyrexScanner s)
+cdef p_DEF_statement(PyrexScanner s)
+cdef p_IF_statement(PyrexScanner s, ctx)
+cdef p_statement(PyrexScanner s, ctx, bint first_statement = *)
+cdef p_statement_list(PyrexScanner s, ctx, bint first_statement = *)
+cdef p_suite(PyrexScanner s, ctx = *)
+cdef tuple p_suite_with_docstring(PyrexScanner s, ctx, bint with_doc_only=*)
+cdef tuple _extract_docstring(node)
+cdef p_positional_and_keyword_args(PyrexScanner s, end_sy_set, templates = *)
+
+cpdef p_c_base_type(PyrexScanner s, bint self_flag = *, bint nonempty = *, templates = *)
+cdef p_calling_convention(PyrexScanner s)
+cdef p_c_complex_base_type(PyrexScanner s, templates = *)
+cdef p_c_simple_base_type(PyrexScanner s, bint self_flag, bint nonempty, templates = *)
+cdef p_buffer_or_template(PyrexScanner s, base_type_node, templates)
+cdef p_bracketed_base_type(PyrexScanner s, base_type_node, nonempty, empty)
+cdef is_memoryviewslice_access(PyrexScanner s)
+cdef p_memoryviewslice_access(PyrexScanner s, base_type_node)
+cdef bint looking_at_name(PyrexScanner s) except -2
+cdef object looking_at_expr(PyrexScanner s)# except -2
+cdef bint looking_at_base_type(PyrexScanner s) except -2
+cdef bint looking_at_dotted_name(PyrexScanner s) except -2
+cdef bint looking_at_call(PyrexScanner s) except -2
+cdef p_sign_and_longness(PyrexScanner s)
+cdef p_opt_cname(PyrexScanner s)
+cpdef p_c_declarator(PyrexScanner s, ctx = *, bint empty = *, bint is_type = *, bint cmethod_flag = *,
+ bint assignable = *, bint nonempty = *,
+ bint calling_convention_allowed = *)
+cdef p_c_array_declarator(PyrexScanner s, base)
+cdef p_c_func_declarator(PyrexScanner s, pos, ctx, base, bint cmethod_flag)
+cdef p_c_simple_declarator(PyrexScanner s, ctx, bint empty, bint is_type, bint cmethod_flag,
+ bint assignable, bint nonempty)
+cdef p_nogil(PyrexScanner s)
+cdef p_with_gil(PyrexScanner s)
+cdef p_exception_value_clause(PyrexScanner s)
+cpdef p_c_arg_list(PyrexScanner s, ctx = *, bint in_pyfunc = *, bint cmethod_flag = *,
+ bint nonempty_declarators = *, bint kw_only = *, bint annotated = *)
+cdef p_optional_ellipsis(PyrexScanner s)
+cdef p_c_arg_decl(PyrexScanner s, ctx, in_pyfunc, bint cmethod_flag = *, bint nonempty = *, bint kw_only = *, bint annotated = *)
+cdef p_api(PyrexScanner s)
+cdef p_cdef_statement(PyrexScanner s, ctx)
+cdef p_cdef_block(PyrexScanner s, ctx)
+cdef p_cdef_extern_block(PyrexScanner s, pos, ctx)
+cdef p_c_enum_definition(PyrexScanner s, pos, ctx)
+cdef p_c_enum_line(PyrexScanner s, ctx, list items)
+cdef p_c_enum_item(PyrexScanner s, ctx, list items)
+cdef p_c_struct_or_union_definition(PyrexScanner s, pos, ctx)
+cdef p_fused_definition(PyrexScanner s, pos, ctx)
+cdef p_struct_enum(PyrexScanner s, pos, ctx)
+cdef p_visibility(PyrexScanner s, prev_visibility)
+cdef p_c_modifiers(PyrexScanner s)
+cdef p_c_func_or_var_declaration(PyrexScanner s, pos, ctx)
+cdef p_ctypedef_statement(PyrexScanner s, ctx)
+cdef p_decorators(PyrexScanner s)
+cdef _reject_cdef_modifier_in_py(PyrexScanner s, name)
+cdef p_def_statement(PyrexScanner s, list decorators=*, bint is_async_def=*)
+cdef p_varargslist(PyrexScanner s, terminator=*, bint annotated = *)
+cdef p_py_arg_decl(PyrexScanner s, bint annotated = *)
+cdef p_class_statement(PyrexScanner s, decorators)
+cdef p_c_class_definition(PyrexScanner s, pos, ctx)
+cdef tuple p_c_class_options(PyrexScanner s)
+cdef p_property_decl(PyrexScanner s)
+cdef p_doc_string(PyrexScanner s)
+cdef p_ignorable_statement(PyrexScanner s)
+cdef dict p_compiler_directive_comments(PyrexScanner s)
+cdef p_template_definition(PyrexScanner s)
+cdef p_cpp_class_definition(PyrexScanner s, pos, ctx)
+cdef p_cpp_class_attribute(PyrexScanner s, ctx)
diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.py b/contrib/tools/cython/Cython/Compiler/Parsing.py
new file mode 100644
index 0000000000..20dbc9bbf9
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Parsing.py
@@ -0,0 +1,3860 @@
+# cython: auto_cpdef=True, infer_types=True, language_level=3, py2_import=True
+#
+# Parser
+#
+
+from __future__ import absolute_import
+
+# This should be done automatically
+import cython
+cython.declare(Nodes=object, ExprNodes=object, EncodedString=object,
+ bytes_literal=object, StringEncoding=object,
+ FileSourceDescriptor=object, lookup_unicodechar=object, unicode_category=object,
+ Future=object, Options=object, error=object, warning=object,
+ Builtin=object, ModuleNode=object, Utils=object, _unicode=object, _bytes=object,
+ re=object, sys=object, _parse_escape_sequences=object, _parse_escape_sequences_raw=object,
+ partial=object, reduce=object, _IS_PY3=cython.bint, _IS_2BYTE_UNICODE=cython.bint,
+ _CDEF_MODIFIERS=tuple)
+
+from io import StringIO
+import re
+import sys
+from unicodedata import lookup as lookup_unicodechar, category as unicode_category
+from functools import partial, reduce
+
+from .Scanning import PyrexScanner, FileSourceDescriptor, StringSourceDescriptor
+from . import Nodes
+from . import ExprNodes
+from . import Builtin
+from . import StringEncoding
+from .StringEncoding import EncodedString, bytes_literal, _unicode, _bytes
+from .ModuleNode import ModuleNode
+from .Errors import error, warning
+from .. import Utils
+from . import Future
+from . import Options
+
+_IS_PY3 = sys.version_info[0] >= 3
+_IS_2BYTE_UNICODE = sys.maxunicode == 0xffff
+_CDEF_MODIFIERS = ('inline', 'nogil', 'api')
+
+
+class Ctx(object):
+ # Parsing context
+ level = 'other'
+ visibility = 'private'
+ cdef_flag = 0
+ typedef_flag = 0
+ api = 0
+ overridable = 0
+ nogil = 0
+ namespace = None
+ templates = None
+ allow_struct_enum_decorator = False
+
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+
+ def __call__(self, **kwds):
+ ctx = Ctx()
+ d = ctx.__dict__
+ d.update(self.__dict__)
+ d.update(kwds)
+ return ctx
+
+
+def p_ident(s, message="Expected an identifier"):
+ if s.sy == 'IDENT':
+ name = s.systring
+ s.next()
+ return name
+ else:
+ s.error(message)
+
+def p_ident_list(s):
+ names = []
+ while s.sy == 'IDENT':
+ names.append(s.systring)
+ s.next()
+ if s.sy != ',':
+ break
+ s.next()
+ return names
+
+#------------------------------------------
+#
+# Expressions
+#
+#------------------------------------------
+
+def p_binop_operator(s):
+ pos = s.position()
+ op = s.sy
+ s.next()
+ return op, pos
+
+def p_binop_expr(s, ops, p_sub_expr):
+ n1 = p_sub_expr(s)
+ while s.sy in ops:
+ op, pos = p_binop_operator(s)
+ n2 = p_sub_expr(s)
+ n1 = ExprNodes.binop_node(pos, op, n1, n2)
+ if op == '/':
+ if Future.division in s.context.future_directives:
+ n1.truedivision = True
+ else:
+ n1.truedivision = None # unknown
+ return n1
+
+#lambdef: 'lambda' [varargslist] ':' test
+
+def p_lambdef(s, allow_conditional=True):
+ # s.sy == 'lambda'
+ pos = s.position()
+ s.next()
+ if s.sy == ':':
+ args = []
+ star_arg = starstar_arg = None
+ else:
+ args, star_arg, starstar_arg = p_varargslist(
+ s, terminator=':', annotated=False)
+ s.expect(':')
+ if allow_conditional:
+ expr = p_test(s)
+ else:
+ expr = p_test_nocond(s)
+ return ExprNodes.LambdaNode(
+ pos, args = args,
+ star_arg = star_arg, starstar_arg = starstar_arg,
+ result_expr = expr)
+
+#lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+
+def p_lambdef_nocond(s):
+ return p_lambdef(s, allow_conditional=False)
+
+#test: or_test ['if' or_test 'else' test] | lambdef
+
+def p_test(s):
+ if s.sy == 'lambda':
+ return p_lambdef(s)
+ pos = s.position()
+ expr = p_or_test(s)
+ if s.sy == 'if':
+ s.next()
+ test = p_or_test(s)
+ s.expect('else')
+ other = p_test(s)
+ return ExprNodes.CondExprNode(pos, test=test, true_val=expr, false_val=other)
+ else:
+ return expr
+
+#test_nocond: or_test | lambdef_nocond
+
+def p_test_nocond(s):
+ if s.sy == 'lambda':
+ return p_lambdef_nocond(s)
+ else:
+ return p_or_test(s)
+
+#or_test: and_test ('or' and_test)*
+
+def p_or_test(s):
+ return p_rassoc_binop_expr(s, ('or',), p_and_test)
+
+def p_rassoc_binop_expr(s, ops, p_subexpr):
+ n1 = p_subexpr(s)
+ if s.sy in ops:
+ pos = s.position()
+ op = s.sy
+ s.next()
+ n2 = p_rassoc_binop_expr(s, ops, p_subexpr)
+ n1 = ExprNodes.binop_node(pos, op, n1, n2)
+ return n1
+
+#and_test: not_test ('and' not_test)*
+
+def p_and_test(s):
+ #return p_binop_expr(s, ('and',), p_not_test)
+ return p_rassoc_binop_expr(s, ('and',), p_not_test)
+
+#not_test: 'not' not_test | comparison
+
+def p_not_test(s):
+ if s.sy == 'not':
+ pos = s.position()
+ s.next()
+ return ExprNodes.NotNode(pos, operand = p_not_test(s))
+ else:
+ return p_comparison(s)
+
+#comparison: expr (comp_op expr)*
+#comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+
+def p_comparison(s):
+ n1 = p_starred_expr(s)
+ if s.sy in comparison_ops:
+ pos = s.position()
+ op = p_cmp_op(s)
+ n2 = p_starred_expr(s)
+ n1 = ExprNodes.PrimaryCmpNode(pos,
+ operator = op, operand1 = n1, operand2 = n2)
+ if s.sy in comparison_ops:
+ n1.cascade = p_cascaded_cmp(s)
+ return n1
+
+def p_test_or_starred_expr(s):
+ if s.sy == '*':
+ return p_starred_expr(s)
+ else:
+ return p_test(s)
+
+def p_starred_expr(s):
+ pos = s.position()
+ if s.sy == '*':
+ starred = True
+ s.next()
+ else:
+ starred = False
+ expr = p_bit_expr(s)
+ if starred:
+ expr = ExprNodes.StarredUnpackingNode(pos, expr)
+ return expr
+
+def p_cascaded_cmp(s):
+ pos = s.position()
+ op = p_cmp_op(s)
+ n2 = p_starred_expr(s)
+ result = ExprNodes.CascadedCmpNode(pos,
+ operator = op, operand2 = n2)
+ if s.sy in comparison_ops:
+ result.cascade = p_cascaded_cmp(s)
+ return result
+
+def p_cmp_op(s):
+ if s.sy == 'not':
+ s.next()
+ s.expect('in')
+ op = 'not_in'
+ elif s.sy == 'is':
+ s.next()
+ if s.sy == 'not':
+ s.next()
+ op = 'is_not'
+ else:
+ op = 'is'
+ else:
+ op = s.sy
+ s.next()
+ if op == '<>':
+ op = '!='
+ return op
+
+comparison_ops = cython.declare(set, set([
+ '<', '>', '==', '>=', '<=', '<>', '!=',
+ 'in', 'is', 'not'
+]))
+
+#expr: xor_expr ('|' xor_expr)*
+
+def p_bit_expr(s):
+ return p_binop_expr(s, ('|',), p_xor_expr)
+
+#xor_expr: and_expr ('^' and_expr)*
+
+def p_xor_expr(s):
+ return p_binop_expr(s, ('^',), p_and_expr)
+
+#and_expr: shift_expr ('&' shift_expr)*
+
+def p_and_expr(s):
+ return p_binop_expr(s, ('&',), p_shift_expr)
+
+#shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+
+def p_shift_expr(s):
+ return p_binop_expr(s, ('<<', '>>'), p_arith_expr)
+
+#arith_expr: term (('+'|'-') term)*
+
+def p_arith_expr(s):
+ return p_binop_expr(s, ('+', '-'), p_term)
+
+#term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+
+def p_term(s):
+ return p_binop_expr(s, ('*', '@', '/', '%', '//'), p_factor)
+
+#factor: ('+'|'-'|'~'|'&'|typecast|sizeof) factor | power
+
+def p_factor(s):
+ # little indirection for C-ification purposes
+ return _p_factor(s)
+
+def _p_factor(s):
+ sy = s.sy
+ if sy in ('+', '-', '~'):
+ op = s.sy
+ pos = s.position()
+ s.next()
+ return ExprNodes.unop_node(pos, op, p_factor(s))
+ elif not s.in_python_file:
+ if sy == '&':
+ pos = s.position()
+ s.next()
+ arg = p_factor(s)
+ return ExprNodes.AmpersandNode(pos, operand = arg)
+ elif sy == "<":
+ return p_typecast(s)
+ elif sy == 'IDENT' and s.systring == "sizeof":
+ return p_sizeof(s)
+ return p_power(s)
+
+def p_typecast(s):
+ # s.sy == "<"
+ pos = s.position()
+ s.next()
+ base_type = p_c_base_type(s)
+ is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
+ is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
+ is_const = isinstance(base_type, Nodes.CConstTypeNode)
+ if (not is_memslice and not is_template and not is_const
+ and base_type.name is None):
+ s.error("Unknown type")
+ declarator = p_c_declarator(s, empty = 1)
+ if s.sy == '?':
+ s.next()
+ typecheck = 1
+ else:
+ typecheck = 0
+ s.expect(">")
+ operand = p_factor(s)
+ if is_memslice:
+ return ExprNodes.CythonArrayNode(pos, base_type_node=base_type,
+ operand=operand)
+
+ return ExprNodes.TypecastNode(pos,
+ base_type = base_type,
+ declarator = declarator,
+ operand = operand,
+ typecheck = typecheck)
+
+def p_sizeof(s):
+ # s.sy == ident "sizeof"
+ pos = s.position()
+ s.next()
+ s.expect('(')
+ # Here we decide if we are looking at an expression or type
+ # If it is actually a type, but parsable as an expression,
+ # we treat it as an expression here.
+ if looking_at_expr(s):
+ operand = p_test(s)
+ node = ExprNodes.SizeofVarNode(pos, operand = operand)
+ else:
+ base_type = p_c_base_type(s)
+ declarator = p_c_declarator(s, empty = 1)
+ node = ExprNodes.SizeofTypeNode(pos,
+ base_type = base_type, declarator = declarator)
+ s.expect(')')
+ return node
+
+
+def p_yield_expression(s):
+ # s.sy == "yield"
+ pos = s.position()
+ s.next()
+ is_yield_from = False
+ if s.sy == 'from':
+ is_yield_from = True
+ s.next()
+ if s.sy != ')' and s.sy not in statement_terminators:
+ # "yield from" does not support implicit tuples, but "yield" does ("yield 1,2")
+ arg = p_test(s) if is_yield_from else p_testlist(s)
+ else:
+ if is_yield_from:
+ s.error("'yield from' requires a source argument",
+ pos=pos, fatal=False)
+ arg = None
+ if is_yield_from:
+ return ExprNodes.YieldFromExprNode(pos, arg=arg)
+ else:
+ return ExprNodes.YieldExprNode(pos, arg=arg)
+
+
+def p_yield_statement(s):
+ # s.sy == "yield"
+ yield_expr = p_yield_expression(s)
+ return Nodes.ExprStatNode(yield_expr.pos, expr=yield_expr)
+
+
+def p_async_statement(s, ctx, decorators):
+ # s.sy >> 'async' ...
+ if s.sy == 'def':
+ # 'async def' statements aren't allowed in pxd files
+ if 'pxd' in ctx.level:
+ s.error('def statement not allowed here')
+ s.level = ctx.level
+ return p_def_statement(s, decorators, is_async_def=True)
+ elif decorators:
+ s.error("Decorators can only be followed by functions or classes")
+ elif s.sy == 'for':
+ return p_for_statement(s, is_async=True)
+ elif s.sy == 'with':
+ s.next()
+ return p_with_items(s, is_async=True)
+ else:
+ s.error("expected one of 'def', 'for', 'with' after 'async'")
+
+
+#power: atom_expr ('**' factor)*
+#atom_expr: ['await'] atom trailer*
+
+def p_power(s):
+ if s.systring == 'new' and s.peek()[0] == 'IDENT':
+ return p_new_expr(s)
+ await_pos = None
+ if s.sy == 'await':
+ await_pos = s.position()
+ s.next()
+ n1 = p_atom(s)
+ while s.sy in ('(', '[', '.'):
+ n1 = p_trailer(s, n1)
+ if await_pos:
+ n1 = ExprNodes.AwaitExprNode(await_pos, arg=n1)
+ if s.sy == '**':
+ pos = s.position()
+ s.next()
+ n2 = p_factor(s)
+ n1 = ExprNodes.binop_node(pos, '**', n1, n2)
+ return n1
+
+
+def p_new_expr(s):
+ # s.systring == 'new'.
+ pos = s.position()
+ s.next()
+ cppclass = p_c_base_type(s)
+ return p_call(s, ExprNodes.NewExprNode(pos, cppclass = cppclass))
+
+#trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+
+def p_trailer(s, node1):
+ pos = s.position()
+ if s.sy == '(':
+ return p_call(s, node1)
+ elif s.sy == '[':
+ return p_index(s, node1)
+ else: # s.sy == '.'
+ s.next()
+ name = p_ident(s)
+ return ExprNodes.AttributeNode(pos,
+ obj=node1, attribute=name)
+
+
+# arglist: argument (',' argument)* [',']
+# argument: [test '='] test # Really [keyword '='] test
+
+# since PEP 448:
+# argument: ( test [comp_for] |
+# test '=' test |
+# '**' expr |
+# star_expr )
+
+def p_call_parse_args(s, allow_genexp=True):
+ # s.sy == '('
+ pos = s.position()
+ s.next()
+ positional_args = []
+ keyword_args = []
+ starstar_seen = False
+ last_was_tuple_unpack = False
+ while s.sy != ')':
+ if s.sy == '*':
+ if starstar_seen:
+ s.error("Non-keyword arg following keyword arg", pos=s.position())
+ s.next()
+ positional_args.append(p_test(s))
+ last_was_tuple_unpack = True
+ elif s.sy == '**':
+ s.next()
+ keyword_args.append(p_test(s))
+ starstar_seen = True
+ else:
+ arg = p_test(s)
+ if s.sy == '=':
+ s.next()
+ if not arg.is_name:
+ s.error("Expected an identifier before '='",
+ pos=arg.pos)
+ encoded_name = s.context.intern_ustring(arg.name)
+ keyword = ExprNodes.IdentifierStringNode(
+ arg.pos, value=encoded_name)
+ arg = p_test(s)
+ keyword_args.append((keyword, arg))
+ else:
+ if keyword_args:
+ s.error("Non-keyword arg following keyword arg", pos=arg.pos)
+ if positional_args and not last_was_tuple_unpack:
+ positional_args[-1].append(arg)
+ else:
+ positional_args.append([arg])
+ last_was_tuple_unpack = False
+ if s.sy != ',':
+ break
+ s.next()
+
+ if s.sy in ('for', 'async'):
+ if not keyword_args and not last_was_tuple_unpack:
+ if len(positional_args) == 1 and len(positional_args[0]) == 1:
+ positional_args = [[p_genexp(s, positional_args[0][0])]]
+ s.expect(')')
+ return positional_args or [[]], keyword_args
+
+
+def p_call_build_packed_args(pos, positional_args, keyword_args):
+ keyword_dict = None
+
+ subtuples = [
+ ExprNodes.TupleNode(pos, args=arg) if isinstance(arg, list) else ExprNodes.AsTupleNode(pos, arg=arg)
+ for arg in positional_args
+ ]
+ # TODO: implement a faster way to join tuples than creating each one and adding them
+ arg_tuple = reduce(partial(ExprNodes.binop_node, pos, '+'), subtuples)
+
+ if keyword_args:
+ kwargs = []
+ dict_items = []
+ for item in keyword_args:
+ if isinstance(item, tuple):
+ key, value = item
+ dict_items.append(ExprNodes.DictItemNode(pos=key.pos, key=key, value=value))
+ elif item.is_dict_literal:
+ # unpack "**{a:b}" directly
+ dict_items.extend(item.key_value_pairs)
+ else:
+ if dict_items:
+ kwargs.append(ExprNodes.DictNode(
+ dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True))
+ dict_items = []
+ kwargs.append(item)
+
+ if dict_items:
+ kwargs.append(ExprNodes.DictNode(
+ dict_items[0].pos, key_value_pairs=dict_items, reject_duplicates=True))
+
+ if kwargs:
+ if len(kwargs) == 1 and kwargs[0].is_dict_literal:
+ # only simple keyword arguments found -> one dict
+ keyword_dict = kwargs[0]
+ else:
+ # at least one **kwargs
+ keyword_dict = ExprNodes.MergedDictNode(pos, keyword_args=kwargs)
+
+ return arg_tuple, keyword_dict
+
+
+def p_call(s, function):
+ # s.sy == '('
+ pos = s.position()
+ positional_args, keyword_args = p_call_parse_args(s)
+
+ if not keyword_args and len(positional_args) == 1 and isinstance(positional_args[0], list):
+ return ExprNodes.SimpleCallNode(pos, function=function, args=positional_args[0])
+ else:
+ arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args)
+ return ExprNodes.GeneralCallNode(
+ pos, function=function, positional_args=arg_tuple, keyword_args=keyword_dict)
+
+
+#lambdef: 'lambda' [varargslist] ':' test
+
+#subscriptlist: subscript (',' subscript)* [',']
+
+def p_index(s, base):
+ # s.sy == '['
+ pos = s.position()
+ s.next()
+ subscripts, is_single_value = p_subscript_list(s)
+ if is_single_value and len(subscripts[0]) == 2:
+ start, stop = subscripts[0]
+ result = ExprNodes.SliceIndexNode(pos,
+ base = base, start = start, stop = stop)
+ else:
+ indexes = make_slice_nodes(pos, subscripts)
+ if is_single_value:
+ index = indexes[0]
+ else:
+ index = ExprNodes.TupleNode(pos, args = indexes)
+ result = ExprNodes.IndexNode(pos,
+ base = base, index = index)
+ s.expect(']')
+ return result
+
+def p_subscript_list(s):
+ is_single_value = True
+ items = [p_subscript(s)]
+ while s.sy == ',':
+ is_single_value = False
+ s.next()
+ if s.sy == ']':
+ break
+ items.append(p_subscript(s))
+ return items, is_single_value
+
+#subscript: '.' '.' '.' | test | [test] ':' [test] [':' [test]]
+
+def p_subscript(s):
+ # Parse a subscript and return a list of
+ # 1, 2 or 3 ExprNodes, depending on how
+ # many slice elements were encountered.
+ pos = s.position()
+ start = p_slice_element(s, (':',))
+ if s.sy != ':':
+ return [start]
+ s.next()
+ stop = p_slice_element(s, (':', ',', ']'))
+ if s.sy != ':':
+ return [start, stop]
+ s.next()
+ step = p_slice_element(s, (':', ',', ']'))
+ return [start, stop, step]
+
+def p_slice_element(s, follow_set):
+ # Simple expression which may be missing iff
+ # it is followed by something in follow_set.
+ if s.sy not in follow_set:
+ return p_test(s)
+ else:
+ return None
+
+def expect_ellipsis(s):
+ s.expect('.')
+ s.expect('.')
+ s.expect('.')
+
+def make_slice_nodes(pos, subscripts):
+ # Convert a list of subscripts as returned
+ # by p_subscript_list into a list of ExprNodes,
+ # creating SliceNodes for elements with 2 or
+ # more components.
+ result = []
+ for subscript in subscripts:
+ if len(subscript) == 1:
+ result.append(subscript[0])
+ else:
+ result.append(make_slice_node(pos, *subscript))
+ return result
+
+def make_slice_node(pos, start, stop = None, step = None):
+ if not start:
+ start = ExprNodes.NoneNode(pos)
+ if not stop:
+ stop = ExprNodes.NoneNode(pos)
+ if not step:
+ step = ExprNodes.NoneNode(pos)
+ return ExprNodes.SliceNode(pos,
+ start = start, stop = stop, step = step)
+
+#atom: '(' [yield_expr|testlist_comp] ')' | '[' [listmaker] ']' | '{' [dict_or_set_maker] '}' | '`' testlist '`' | NAME | NUMBER | STRING+
+
+def p_atom(s):
+ pos = s.position()
+ sy = s.sy
+ if sy == '(':
+ s.next()
+ if s.sy == ')':
+ result = ExprNodes.TupleNode(pos, args = [])
+ elif s.sy == 'yield':
+ result = p_yield_expression(s)
+ else:
+ result = p_testlist_comp(s)
+ s.expect(')')
+ return result
+ elif sy == '[':
+ return p_list_maker(s)
+ elif sy == '{':
+ return p_dict_or_set_maker(s)
+ elif sy == '`':
+ return p_backquote_expr(s)
+ elif sy == '.':
+ expect_ellipsis(s)
+ return ExprNodes.EllipsisNode(pos)
+ elif sy == 'INT':
+ return p_int_literal(s)
+ elif sy == 'FLOAT':
+ value = s.systring
+ s.next()
+ return ExprNodes.FloatNode(pos, value = value)
+ elif sy == 'IMAG':
+ value = s.systring[:-1]
+ s.next()
+ return ExprNodes.ImagNode(pos, value = value)
+ elif sy == 'BEGIN_STRING':
+ kind, bytes_value, unicode_value = p_cat_string_literal(s)
+ if kind == 'c':
+ return ExprNodes.CharNode(pos, value = bytes_value)
+ elif kind == 'u':
+ return ExprNodes.UnicodeNode(pos, value = unicode_value, bytes_value = bytes_value)
+ elif kind == 'b':
+ return ExprNodes.BytesNode(pos, value = bytes_value)
+ elif kind == 'f':
+ return ExprNodes.JoinedStrNode(pos, values = unicode_value)
+ elif kind == '':
+ return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value)
+ else:
+ s.error("invalid string kind '%s'" % kind)
+ elif sy == 'IDENT':
+ name = s.systring
+ if name == "None":
+ result = ExprNodes.NoneNode(pos)
+ elif name == "True":
+ result = ExprNodes.BoolNode(pos, value=True)
+ elif name == "False":
+ result = ExprNodes.BoolNode(pos, value=False)
+ elif name == "NULL" and not s.in_python_file:
+ result = ExprNodes.NullNode(pos)
+ else:
+ result = p_name(s, name)
+ s.next()
+ return result
+ else:
+ s.error("Expected an identifier or literal")
+
+def p_int_literal(s):
+ pos = s.position()
+ value = s.systring
+ s.next()
+ unsigned = ""
+ longness = ""
+ while value[-1] in u"UuLl":
+ if value[-1] in u"Ll":
+ longness += "L"
+ else:
+ unsigned += "U"
+ value = value[:-1]
+ # '3L' is ambiguous in Py2 but not in Py3. '3U' and '3LL' are
+ # illegal in Py2 Python files. All suffixes are illegal in Py3
+ # Python files.
+ is_c_literal = None
+ if unsigned:
+ is_c_literal = True
+ elif longness:
+ if longness == 'LL' or s.context.language_level >= 3:
+ is_c_literal = True
+ if s.in_python_file:
+ if is_c_literal:
+ error(pos, "illegal integer literal syntax in Python source file")
+ is_c_literal = False
+ return ExprNodes.IntNode(pos,
+ is_c_literal = is_c_literal,
+ value = value,
+ unsigned = unsigned,
+ longness = longness)
+
+
+def p_name(s, name):
+ pos = s.position()
+ if not s.compile_time_expr and name in s.compile_time_env:
+ value = s.compile_time_env.lookup_here(name)
+ node = wrap_compile_time_constant(pos, value)
+ if node is not None:
+ return node
+ return ExprNodes.NameNode(pos, name=name)
+
+
+def wrap_compile_time_constant(pos, value):
+ rep = repr(value)
+ if value is None:
+ return ExprNodes.NoneNode(pos)
+ elif value is Ellipsis:
+ return ExprNodes.EllipsisNode(pos)
+ elif isinstance(value, bool):
+ return ExprNodes.BoolNode(pos, value=value)
+ elif isinstance(value, int):
+ return ExprNodes.IntNode(pos, value=rep, constant_result=value)
+ elif isinstance(value, float):
+ return ExprNodes.FloatNode(pos, value=rep, constant_result=value)
+ elif isinstance(value, complex):
+ node = ExprNodes.ImagNode(pos, value=repr(value.imag), constant_result=complex(0.0, value.imag))
+ if value.real:
+ # FIXME: should we care about -0.0 ?
+ # probably not worth using the '-' operator for negative imag values
+ node = ExprNodes.binop_node(
+ pos, '+', ExprNodes.FloatNode(pos, value=repr(value.real), constant_result=value.real), node,
+ constant_result=value)
+ return node
+ elif isinstance(value, _unicode):
+ return ExprNodes.UnicodeNode(pos, value=EncodedString(value))
+ elif isinstance(value, _bytes):
+ bvalue = bytes_literal(value, 'ascii') # actually: unknown encoding, but BytesLiteral requires one
+ return ExprNodes.BytesNode(pos, value=bvalue, constant_result=value)
+ elif isinstance(value, tuple):
+ args = [wrap_compile_time_constant(pos, arg)
+ for arg in value]
+ if None not in args:
+ return ExprNodes.TupleNode(pos, args=args)
+ else:
+ # error already reported
+ return None
+ elif not _IS_PY3 and isinstance(value, long):
+ return ExprNodes.IntNode(pos, value=rep.rstrip('L'), constant_result=value)
+ error(pos, "Invalid type for compile-time constant: %r (type %s)"
+ % (value, value.__class__.__name__))
+ return None
+
+
+def p_cat_string_literal(s):
+ # A sequence of one or more adjacent string literals.
+ # Returns (kind, bytes_value, unicode_value)
+ # where kind in ('b', 'c', 'u', 'f', '')
+ pos = s.position()
+ kind, bytes_value, unicode_value = p_string_literal(s)
+ if kind == 'c' or s.sy != 'BEGIN_STRING':
+ return kind, bytes_value, unicode_value
+ bstrings, ustrings, positions = [bytes_value], [unicode_value], [pos]
+ bytes_value = unicode_value = None
+ while s.sy == 'BEGIN_STRING':
+ pos = s.position()
+ next_kind, next_bytes_value, next_unicode_value = p_string_literal(s)
+ if next_kind == 'c':
+ error(pos, "Cannot concatenate char literal with another string or char literal")
+ continue
+ elif next_kind != kind:
+ # concatenating f strings and normal strings is allowed and leads to an f string
+ if set([kind, next_kind]) in (set(['f', 'u']), set(['f', ''])):
+ kind = 'f'
+ else:
+ error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" % (
+ kind, next_kind))
+ continue
+ bstrings.append(next_bytes_value)
+ ustrings.append(next_unicode_value)
+ positions.append(pos)
+ # join and rewrap the partial literals
+ if kind in ('b', 'c', '') or kind == 'u' and None not in bstrings:
+ # Py3 enforced unicode literals are parsed as bytes/unicode combination
+ bytes_value = bytes_literal(StringEncoding.join_bytes(bstrings), s.source_encoding)
+ if kind in ('u', ''):
+ unicode_value = EncodedString(u''.join([u for u in ustrings if u is not None]))
+ if kind == 'f':
+ unicode_value = []
+ for u, pos in zip(ustrings, positions):
+ if isinstance(u, list):
+ unicode_value += u
+ else:
+ # non-f-string concatenated into the f-string
+ unicode_value.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u)))
+ return kind, bytes_value, unicode_value
+
+
+def p_opt_string_literal(s, required_type='u'):
+ if s.sy != 'BEGIN_STRING':
+ return None
+ pos = s.position()
+ kind, bytes_value, unicode_value = p_string_literal(s, required_type)
+ if required_type == 'u':
+ if kind == 'f':
+ s.error("f-string not allowed here", pos)
+ return unicode_value
+ elif required_type == 'b':
+ return bytes_value
+ else:
+ s.error("internal parser configuration error")
+
+
+def check_for_non_ascii_characters(string):
+ for c in string:
+ if c >= u'\x80':
+ return True
+ return False
+
+
+def p_string_literal(s, kind_override=None):
+ # A single string or char literal. Returns (kind, bvalue, uvalue)
+ # where kind in ('b', 'c', 'u', 'f', ''). The 'bvalue' is the source
+ # code byte sequence of the string literal, 'uvalue' is the
+ # decoded Unicode string. Either of the two may be None depending
+ # on the 'kind' of string, only unprefixed strings have both
+ # representations. In f-strings, the uvalue is a list of the Unicode
+ # strings and f-string expressions that make up the f-string.
+
+ # s.sy == 'BEGIN_STRING'
+ pos = s.position()
+ is_python3_source = s.context.language_level >= 3
+ has_non_ascii_literal_characters = False
+ string_start_pos = (pos[0], pos[1], pos[2] + len(s.systring))
+ kind_string = s.systring.rstrip('"\'').lower()
+ if len(kind_string) > 1:
+ if len(set(kind_string)) != len(kind_string):
+ error(pos, 'Duplicate string prefix character')
+ if 'b' in kind_string and 'u' in kind_string:
+ error(pos, 'String prefixes b and u cannot be combined')
+ if 'b' in kind_string and 'f' in kind_string:
+ error(pos, 'String prefixes b and f cannot be combined')
+ if 'u' in kind_string and 'f' in kind_string:
+ error(pos, 'String prefixes u and f cannot be combined')
+
+ is_raw = 'r' in kind_string
+
+ if 'c' in kind_string:
+ # this should never happen, since the lexer does not allow combining c
+ # with other prefix characters
+ if len(kind_string) != 1:
+ error(pos, 'Invalid string prefix for character literal')
+ kind = 'c'
+ elif 'f' in kind_string:
+ kind = 'f' # u is ignored
+ is_raw = True # postpone the escape resolution
+ elif 'b' in kind_string:
+ kind = 'b'
+ elif 'u' in kind_string:
+ kind = 'u'
+ else:
+ kind = ''
+
+ if kind == '' and kind_override is None and Future.unicode_literals in s.context.future_directives:
+ chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
+ kind = 'u'
+ else:
+ if kind_override is not None and kind_override in 'ub':
+ kind = kind_override
+ if kind in ('u', 'f'): # f-strings are scanned exactly like Unicode literals, but are parsed further later
+ chars = StringEncoding.UnicodeLiteralBuilder()
+ elif kind == '':
+ chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
+ else:
+ chars = StringEncoding.BytesLiteralBuilder(s.source_encoding)
+
+ while 1:
+ s.next()
+ sy = s.sy
+ systr = s.systring
+ # print "p_string_literal: sy =", sy, repr(s.systring) ###
+ if sy == 'CHARS':
+ chars.append(systr)
+ if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr):
+ has_non_ascii_literal_characters = True
+ elif sy == 'ESCAPE':
+ # in Py2, 'ur' raw unicode strings resolve unicode escapes but nothing else
+ if is_raw and (is_python3_source or kind != 'u' or systr[1] not in u'Uu'):
+ chars.append(systr)
+ if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr):
+ has_non_ascii_literal_characters = True
+ else:
+ _append_escape_sequence(kind, chars, systr, s)
+ elif sy == 'NEWLINE':
+ chars.append(u'\n')
+ elif sy == 'END_STRING':
+ break
+ elif sy == 'EOF':
+ s.error("Unclosed string literal", pos=pos)
+ else:
+ s.error("Unexpected token %r:%r in string literal" % (
+ sy, s.systring))
+
+ if kind == 'c':
+ unicode_value = None
+ bytes_value = chars.getchar()
+ if len(bytes_value) != 1:
+ error(pos, u"invalid character literal: %r" % bytes_value)
+ else:
+ bytes_value, unicode_value = chars.getstrings()
+ if (has_non_ascii_literal_characters
+ and is_python3_source and Future.unicode_literals in s.context.future_directives):
+ # Python 3 forbids literal non-ASCII characters in byte strings
+ if kind == 'b':
+ s.error("bytes can only contain ASCII literal characters.", pos=pos)
+ bytes_value = None
+ if kind == 'f':
+ unicode_value = p_f_string(s, unicode_value, string_start_pos, is_raw='r' in kind_string)
+ s.next()
+ return (kind, bytes_value, unicode_value)
+
+
+def _append_escape_sequence(kind, builder, escape_sequence, s):
+ c = escape_sequence[1]
+ if c in u"01234567":
+ builder.append_charval(int(escape_sequence[1:], 8))
+ elif c in u"'\"\\":
+ builder.append(c)
+ elif c in u"abfnrtv":
+ builder.append(StringEncoding.char_from_escape_sequence(escape_sequence))
+ elif c == u'\n':
+ pass # line continuation
+ elif c == u'x': # \xXX
+ if len(escape_sequence) == 4:
+ builder.append_charval(int(escape_sequence[2:], 16))
+ else:
+ s.error("Invalid hex escape '%s'" % escape_sequence, fatal=False)
+ elif c in u'NUu' and kind in ('u', 'f', ''): # \uxxxx, \Uxxxxxxxx, \N{...}
+ chrval = -1
+ if c == u'N':
+ uchar = None
+ try:
+ uchar = lookup_unicodechar(escape_sequence[3:-1])
+ chrval = ord(uchar)
+ except KeyError:
+ s.error("Unknown Unicode character name %s" %
+ repr(escape_sequence[3:-1]).lstrip('u'), fatal=False)
+ except TypeError:
+ # 2-byte unicode build of CPython?
+ if (uchar is not None and _IS_2BYTE_UNICODE and len(uchar) == 2 and
+ unicode_category(uchar[0]) == 'Cs' and unicode_category(uchar[1]) == 'Cs'):
+ # surrogate pair instead of single character
+ chrval = 0x10000 + (ord(uchar[0]) - 0xd800) >> 10 + (ord(uchar[1]) - 0xdc00)
+ else:
+ raise
+ elif len(escape_sequence) in (6, 10):
+ chrval = int(escape_sequence[2:], 16)
+ if chrval > 1114111: # sys.maxunicode:
+ s.error("Invalid unicode escape '%s'" % escape_sequence)
+ chrval = -1
+ else:
+ s.error("Invalid unicode escape '%s'" % escape_sequence, fatal=False)
+ if chrval >= 0:
+ builder.append_uescape(chrval, escape_sequence)
+ else:
+ builder.append(escape_sequence)
+
+
+_parse_escape_sequences_raw, _parse_escape_sequences = [re.compile((
+ # escape sequences:
+ br'(\\(?:' +
+ (br'\\?' if is_raw else (
+ br'[\\abfnrtv"\'{]|'
+ br'[0-7]{2,3}|'
+ br'N\{[^}]*\}|'
+ br'x[0-9a-fA-F]{2}|'
+ br'u[0-9a-fA-F]{4}|'
+ br'U[0-9a-fA-F]{8}|'
+ br'[NxuU]|' # detect invalid escape sequences that do not match above
+ )) +
+ br')?|'
+ # non-escape sequences:
+ br'\{\{?|'
+ br'\}\}?|'
+ br'[^\\{}]+)'
+ ).decode('us-ascii')).match
+ for is_raw in (True, False)]
+
+
+def _f_string_error_pos(pos, string, i):
+ return (pos[0], pos[1], pos[2] + i + 1) # FIXME: handle newlines in string
+
+
+def p_f_string(s, unicode_value, pos, is_raw):
+ # Parses a PEP 498 f-string literal into a list of nodes. Nodes are either UnicodeNodes
+ # or FormattedValueNodes.
+ values = []
+ next_start = 0
+ size = len(unicode_value)
+ builder = StringEncoding.UnicodeLiteralBuilder()
+ _parse_seq = _parse_escape_sequences_raw if is_raw else _parse_escape_sequences
+
+ while next_start < size:
+ end = next_start
+ match = _parse_seq(unicode_value, next_start)
+ if match is None:
+ error(_f_string_error_pos(pos, unicode_value, next_start), "Invalid escape sequence")
+
+ next_start = match.end()
+ part = match.group()
+ c = part[0]
+ if c == '\\':
+ if not is_raw and len(part) > 1:
+ _append_escape_sequence('f', builder, part, s)
+ else:
+ builder.append(part)
+ elif c == '{':
+ if part == '{{':
+ builder.append('{')
+ else:
+ # start of an expression
+ if builder.chars:
+ values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
+ builder = StringEncoding.UnicodeLiteralBuilder()
+ next_start, expr_node = p_f_string_expr(s, unicode_value, pos, next_start, is_raw)
+ values.append(expr_node)
+ elif c == '}':
+ if part == '}}':
+ builder.append('}')
+ else:
+ error(_f_string_error_pos(pos, unicode_value, end),
+ "f-string: single '}' is not allowed")
+ else:
+ builder.append(part)
+
+ if builder.chars:
+ values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
+ return values
+
+
+def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw):
+ # Parses a {}-delimited expression inside an f-string. Returns a FormattedValueNode
+ # and the index in the string that follows the expression.
+ i = starting_index
+ size = len(unicode_value)
+ conversion_char = terminal_char = format_spec = None
+ format_spec_str = None
+ NO_CHAR = 2**30
+
+ nested_depth = 0
+ quote_char = NO_CHAR
+ in_triple_quotes = False
+ backslash_reported = False
+
+ while True:
+ if i >= size:
+ break # error will be reported below
+ c = unicode_value[i]
+
+ if quote_char != NO_CHAR:
+ if c == '\\':
+ # avoid redundant error reports along '\' sequences
+ if not backslash_reported:
+ error(_f_string_error_pos(pos, unicode_value, i),
+ "backslashes not allowed in f-strings")
+ backslash_reported = True
+ elif c == quote_char:
+ if in_triple_quotes:
+ if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c:
+ in_triple_quotes = False
+ quote_char = NO_CHAR
+ i += 2
+ else:
+ quote_char = NO_CHAR
+ elif c in '\'"':
+ quote_char = c
+ if i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c:
+ in_triple_quotes = True
+ i += 2
+ elif c in '{[(':
+ nested_depth += 1
+ elif nested_depth != 0 and c in '}])':
+ nested_depth -= 1
+ elif c == '#':
+ error(_f_string_error_pos(pos, unicode_value, i),
+ "format string cannot include #")
+ elif nested_depth == 0 and c in '!:}':
+ # allow != as a special case
+ if c == '!' and i + 1 < size and unicode_value[i + 1] == '=':
+ i += 1
+ continue
+
+ terminal_char = c
+ break
+ i += 1
+
+ # normalise line endings as the parser expects that
+ expr_str = unicode_value[starting_index:i].replace('\r\n', '\n').replace('\r', '\n')
+ expr_pos = (pos[0], pos[1], pos[2] + starting_index + 2) # TODO: find exact code position (concat, multi-line, ...)
+
+ if not expr_str.strip():
+ error(_f_string_error_pos(pos, unicode_value, starting_index),
+ "empty expression not allowed in f-string")
+
+ if terminal_char == '!':
+ i += 1
+ if i + 2 > size:
+ pass # error will be reported below
+ else:
+ conversion_char = unicode_value[i]
+ i += 1
+ terminal_char = unicode_value[i]
+
+ if terminal_char == ':':
+ in_triple_quotes = False
+ in_string = False
+ nested_depth = 0
+ start_format_spec = i + 1
+ while True:
+ if i >= size:
+ break # error will be reported below
+ c = unicode_value[i]
+ if not in_triple_quotes and not in_string:
+ if c == '{':
+ nested_depth += 1
+ elif c == '}':
+ if nested_depth > 0:
+ nested_depth -= 1
+ else:
+ terminal_char = c
+ break
+ if c in '\'"':
+ if not in_string and i + 2 < size and unicode_value[i + 1] == c and unicode_value[i + 2] == c:
+ in_triple_quotes = not in_triple_quotes
+ i += 2
+ elif not in_triple_quotes:
+ in_string = not in_string
+ i += 1
+
+ format_spec_str = unicode_value[start_format_spec:i]
+
+ if terminal_char != '}':
+ error(_f_string_error_pos(pos, unicode_value, i),
+ "missing '}' in format string expression" + (
+ ", found '%s'" % terminal_char if terminal_char else ""))
+
+ # parse the expression as if it was surrounded by parentheses
+ buf = StringIO('(%s)' % expr_str)
+ scanner = PyrexScanner(buf, expr_pos[0], parent_scanner=s, source_encoding=s.source_encoding, initial_pos=expr_pos)
+ expr = p_testlist(scanner) # TODO is testlist right here?
+
+ # validate the conversion char
+ if conversion_char is not None and not ExprNodes.FormattedValueNode.find_conversion_func(conversion_char):
+ error(expr_pos, "invalid conversion character '%s'" % conversion_char)
+
+ # the format spec is itself treated like an f-string
+ if format_spec_str:
+ format_spec = ExprNodes.JoinedStrNode(pos, values=p_f_string(s, format_spec_str, pos, is_raw))
+
+ return i + 1, ExprNodes.FormattedValueNode(
+ pos, value=expr, conversion_char=conversion_char, format_spec=format_spec)
+
+
+# since PEP 448:
+# list_display ::= "[" [listmaker] "]"
+# listmaker ::= (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+# comp_iter ::= comp_for | comp_if
+# comp_for ::= ["async"] "for" expression_list "in" testlist [comp_iter]
+# comp_if ::= "if" test [comp_iter]
+
+def p_list_maker(s):
+ # s.sy == '['
+ pos = s.position()
+ s.next()
+ if s.sy == ']':
+ s.expect(']')
+ return ExprNodes.ListNode(pos, args=[])
+
+ expr = p_test_or_starred_expr(s)
+ if s.sy in ('for', 'async'):
+ if expr.is_starred:
+ s.error("iterable unpacking cannot be used in comprehension")
+ append = ExprNodes.ComprehensionAppendNode(pos, expr=expr)
+ loop = p_comp_for(s, append)
+ s.expect(']')
+ return ExprNodes.ComprehensionNode(
+ pos, loop=loop, append=append, type=Builtin.list_type,
+ # list comprehensions leak their loop variable in Py2
+ has_local_scope=s.context.language_level >= 3)
+
+ # (merged) list literal
+ if s.sy == ',':
+ s.next()
+ exprs = p_test_or_starred_expr_list(s, expr)
+ else:
+ exprs = [expr]
+ s.expect(']')
+ return ExprNodes.ListNode(pos, args=exprs)
+
+
+def p_comp_iter(s, body):
+ if s.sy in ('for', 'async'):
+ return p_comp_for(s, body)
+ elif s.sy == 'if':
+ return p_comp_if(s, body)
+ else:
+ # insert the 'append' operation into the loop
+ return body
+
+def p_comp_for(s, body):
+ pos = s.position()
+ # [async] for ...
+ is_async = False
+ if s.sy == 'async':
+ is_async = True
+ s.next()
+
+ # s.sy == 'for'
+ s.expect('for')
+ kw = p_for_bounds(s, allow_testlist=False, is_async=is_async)
+ kw.update(else_clause=None, body=p_comp_iter(s, body), is_async=is_async)
+ return Nodes.ForStatNode(pos, **kw)
+
+def p_comp_if(s, body):
+ # s.sy == 'if'
+ pos = s.position()
+ s.next()
+ test = p_test_nocond(s)
+ return Nodes.IfStatNode(pos,
+ if_clauses = [Nodes.IfClauseNode(pos, condition = test,
+ body = p_comp_iter(s, body))],
+ else_clause = None )
+
+
+# since PEP 448:
+#dictorsetmaker: ( ((test ':' test | '**' expr)
+# (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+# ((test | star_expr)
+# (comp_for | (',' (test | star_expr))* [','])) )
+
+def p_dict_or_set_maker(s):
+ # s.sy == '{'
+ pos = s.position()
+ s.next()
+ if s.sy == '}':
+ s.next()
+ return ExprNodes.DictNode(pos, key_value_pairs=[])
+
+ parts = []
+ target_type = 0
+ last_was_simple_item = False
+ while True:
+ if s.sy in ('*', '**'):
+ # merged set/dict literal
+ if target_type == 0:
+ target_type = 1 if s.sy == '*' else 2 # 'stars'
+ elif target_type != len(s.sy):
+ s.error("unexpected %sitem found in %s literal" % (
+ s.sy, 'set' if target_type == 1 else 'dict'))
+ s.next()
+ if s.sy == '*':
+ s.error("expected expression, found '*'")
+ item = p_starred_expr(s)
+ parts.append(item)
+ last_was_simple_item = False
+ else:
+ item = p_test(s)
+ if target_type == 0:
+ target_type = 2 if s.sy == ':' else 1 # dict vs. set
+ if target_type == 2:
+ # dict literal
+ s.expect(':')
+ key = item
+ value = p_test(s)
+ item = ExprNodes.DictItemNode(key.pos, key=key, value=value)
+ if last_was_simple_item:
+ parts[-1].append(item)
+ else:
+ parts.append([item])
+ last_was_simple_item = True
+
+ if s.sy == ',':
+ s.next()
+ if s.sy == '}':
+ break
+ else:
+ break
+
+ if s.sy in ('for', 'async'):
+ # dict/set comprehension
+ if len(parts) == 1 and isinstance(parts[0], list) and len(parts[0]) == 1:
+ item = parts[0][0]
+ if target_type == 2:
+ assert isinstance(item, ExprNodes.DictItemNode), type(item)
+ comprehension_type = Builtin.dict_type
+ append = ExprNodes.DictComprehensionAppendNode(
+ item.pos, key_expr=item.key, value_expr=item.value)
+ else:
+ comprehension_type = Builtin.set_type
+ append = ExprNodes.ComprehensionAppendNode(item.pos, expr=item)
+ loop = p_comp_for(s, append)
+ s.expect('}')
+ return ExprNodes.ComprehensionNode(pos, loop=loop, append=append, type=comprehension_type)
+ else:
+ # syntax error, try to find a good error message
+ if len(parts) == 1 and not isinstance(parts[0], list):
+ s.error("iterable unpacking cannot be used in comprehension")
+ else:
+ # e.g. "{1,2,3 for ..."
+ s.expect('}')
+ return ExprNodes.DictNode(pos, key_value_pairs=[])
+
+ s.expect('}')
+ if target_type == 1:
+ # (merged) set literal
+ items = []
+ set_items = []
+ for part in parts:
+ if isinstance(part, list):
+ set_items.extend(part)
+ else:
+ if set_items:
+ items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items))
+ set_items = []
+ items.append(part)
+ if set_items:
+ items.append(ExprNodes.SetNode(set_items[0].pos, args=set_items))
+ if len(items) == 1 and items[0].is_set_literal:
+ return items[0]
+ return ExprNodes.MergedSequenceNode(pos, args=items, type=Builtin.set_type)
+ else:
+ # (merged) dict literal
+ items = []
+ dict_items = []
+ for part in parts:
+ if isinstance(part, list):
+ dict_items.extend(part)
+ else:
+ if dict_items:
+ items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items))
+ dict_items = []
+ items.append(part)
+ if dict_items:
+ items.append(ExprNodes.DictNode(dict_items[0].pos, key_value_pairs=dict_items))
+ if len(items) == 1 and items[0].is_dict_literal:
+ return items[0]
+ return ExprNodes.MergedDictNode(pos, keyword_args=items, reject_duplicates=False)
+
+
+# NOTE: no longer in Py3 :)
+def p_backquote_expr(s):
+ # s.sy == '`'
+ pos = s.position()
+ s.next()
+ args = [p_test(s)]
+ while s.sy == ',':
+ s.next()
+ args.append(p_test(s))
+ s.expect('`')
+ if len(args) == 1:
+ arg = args[0]
+ else:
+ arg = ExprNodes.TupleNode(pos, args = args)
+ return ExprNodes.BackquoteNode(pos, arg = arg)
+
+def p_simple_expr_list(s, expr=None):
+ exprs = expr is not None and [expr] or []
+ while s.sy not in expr_terminators:
+ exprs.append( p_test(s) )
+ if s.sy != ',':
+ break
+ s.next()
+ return exprs
+
+
+def p_test_or_starred_expr_list(s, expr=None):
+ exprs = expr is not None and [expr] or []
+ while s.sy not in expr_terminators:
+ exprs.append(p_test_or_starred_expr(s))
+ if s.sy != ',':
+ break
+ s.next()
+ return exprs
+
+
+#testlist: test (',' test)* [',']
+
+def p_testlist(s):
+ pos = s.position()
+ expr = p_test(s)
+ if s.sy == ',':
+ s.next()
+ exprs = p_simple_expr_list(s, expr)
+ return ExprNodes.TupleNode(pos, args = exprs)
+ else:
+ return expr
+
+# testlist_star_expr: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+
+def p_testlist_star_expr(s):
+ pos = s.position()
+ expr = p_test_or_starred_expr(s)
+ if s.sy == ',':
+ s.next()
+ exprs = p_test_or_starred_expr_list(s, expr)
+ return ExprNodes.TupleNode(pos, args = exprs)
+ else:
+ return expr
+
+# testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+
+def p_testlist_comp(s):
+ pos = s.position()
+ expr = p_test_or_starred_expr(s)
+ if s.sy == ',':
+ s.next()
+ exprs = p_test_or_starred_expr_list(s, expr)
+ return ExprNodes.TupleNode(pos, args = exprs)
+ elif s.sy in ('for', 'async'):
+ return p_genexp(s, expr)
+ else:
+ return expr
+
+def p_genexp(s, expr):
+ # s.sy == 'async' | 'for'
+ loop = p_comp_for(s, Nodes.ExprStatNode(
+ expr.pos, expr = ExprNodes.YieldExprNode(expr.pos, arg=expr)))
+ return ExprNodes.GeneratorExpressionNode(expr.pos, loop=loop)
+
+expr_terminators = cython.declare(set, set([
+ ')', ']', '}', ':', '=', 'NEWLINE']))
+
+
+#-------------------------------------------------------
+#
+# Statements
+#
+#-------------------------------------------------------
+
+def p_global_statement(s):
+ # assume s.sy == 'global'
+ pos = s.position()
+ s.next()
+ names = p_ident_list(s)
+ return Nodes.GlobalNode(pos, names = names)
+
+
+def p_nonlocal_statement(s):
+ pos = s.position()
+ s.next()
+ names = p_ident_list(s)
+ return Nodes.NonlocalNode(pos, names = names)
+
+
+def p_expression_or_assignment(s):
+ expr = p_testlist_star_expr(s)
+ if s.sy == ':' and (expr.is_name or expr.is_subscript or expr.is_attribute):
+ s.next()
+ expr.annotation = p_test(s)
+ if s.sy == '=' and expr.is_starred:
+ # This is a common enough error to make when learning Cython to let
+ # it fail as early as possible and give a very clear error message.
+ s.error("a starred assignment target must be in a list or tuple"
+ " - maybe you meant to use an index assignment: var[0] = ...",
+ pos=expr.pos)
+ expr_list = [expr]
+ while s.sy == '=':
+ s.next()
+ if s.sy == 'yield':
+ expr = p_yield_expression(s)
+ else:
+ expr = p_testlist_star_expr(s)
+ expr_list.append(expr)
+ if len(expr_list) == 1:
+ if re.match(r"([-+*/%^&|]|<<|>>|\*\*|//|@)=", s.sy):
+ lhs = expr_list[0]
+ if isinstance(lhs, ExprNodes.SliceIndexNode):
+ # implementation requires IndexNode
+ lhs = ExprNodes.IndexNode(
+ lhs.pos,
+ base=lhs.base,
+ index=make_slice_node(lhs.pos, lhs.start, lhs.stop))
+ elif not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode)):
+ error(lhs.pos, "Illegal operand for inplace operation.")
+ operator = s.sy[:-1]
+ s.next()
+ if s.sy == 'yield':
+ rhs = p_yield_expression(s)
+ else:
+ rhs = p_testlist(s)
+ return Nodes.InPlaceAssignmentNode(lhs.pos, operator=operator, lhs=lhs, rhs=rhs)
+ expr = expr_list[0]
+ return Nodes.ExprStatNode(expr.pos, expr=expr)
+
+ rhs = expr_list[-1]
+ if len(expr_list) == 2:
+ return Nodes.SingleAssignmentNode(rhs.pos, lhs=expr_list[0], rhs=rhs)
+ else:
+ return Nodes.CascadedAssignmentNode(rhs.pos, lhs_list=expr_list[:-1], rhs=rhs)
+
+
+def p_print_statement(s):
+ # s.sy == 'print'
+ pos = s.position()
+ ends_with_comma = 0
+ s.next()
+ if s.sy == '>>':
+ s.next()
+ stream = p_test(s)
+ if s.sy == ',':
+ s.next()
+ ends_with_comma = s.sy in ('NEWLINE', 'EOF')
+ else:
+ stream = None
+ args = []
+ if s.sy not in ('NEWLINE', 'EOF'):
+ args.append(p_test(s))
+ while s.sy == ',':
+ s.next()
+ if s.sy in ('NEWLINE', 'EOF'):
+ ends_with_comma = 1
+ break
+ args.append(p_test(s))
+ arg_tuple = ExprNodes.TupleNode(pos, args=args)
+ return Nodes.PrintStatNode(pos,
+ arg_tuple=arg_tuple, stream=stream,
+ append_newline=not ends_with_comma)
+
+
+def p_exec_statement(s):
+ # s.sy == 'exec'
+ pos = s.position()
+ s.next()
+ code = p_bit_expr(s)
+ if isinstance(code, ExprNodes.TupleNode):
+ # Py3 compatibility syntax
+ tuple_variant = True
+ args = code.args
+ if len(args) not in (2, 3):
+ s.error("expected tuple of length 2 or 3, got length %d" % len(args),
+ pos=pos, fatal=False)
+ args = [code]
+ else:
+ tuple_variant = False
+ args = [code]
+ if s.sy == 'in':
+ if tuple_variant:
+ s.error("tuple variant of exec does not support additional 'in' arguments",
+ fatal=False)
+ s.next()
+ args.append(p_test(s))
+ if s.sy == ',':
+ s.next()
+ args.append(p_test(s))
+ return Nodes.ExecStatNode(pos, args=args)
+
+def p_del_statement(s):
+ # s.sy == 'del'
+ pos = s.position()
+ s.next()
+ # FIXME: 'exprlist' in Python
+ args = p_simple_expr_list(s)
+ return Nodes.DelStatNode(pos, args = args)
+
+def p_pass_statement(s, with_newline = 0):
+ pos = s.position()
+ s.expect('pass')
+ if with_newline:
+ s.expect_newline("Expected a newline", ignore_semicolon=True)
+ return Nodes.PassStatNode(pos)
+
+def p_break_statement(s):
+ # s.sy == 'break'
+ pos = s.position()
+ s.next()
+ return Nodes.BreakStatNode(pos)
+
+def p_continue_statement(s):
+ # s.sy == 'continue'
+ pos = s.position()
+ s.next()
+ return Nodes.ContinueStatNode(pos)
+
+def p_return_statement(s):
+ # s.sy == 'return'
+ pos = s.position()
+ s.next()
+ if s.sy not in statement_terminators:
+ value = p_testlist(s)
+ else:
+ value = None
+ return Nodes.ReturnStatNode(pos, value = value)
+
+def p_raise_statement(s):
+ # s.sy == 'raise'
+ pos = s.position()
+ s.next()
+ exc_type = None
+ exc_value = None
+ exc_tb = None
+ cause = None
+ if s.sy not in statement_terminators:
+ exc_type = p_test(s)
+ if s.sy == ',':
+ s.next()
+ exc_value = p_test(s)
+ if s.sy == ',':
+ s.next()
+ exc_tb = p_test(s)
+ elif s.sy == 'from':
+ s.next()
+ cause = p_test(s)
+ if exc_type or exc_value or exc_tb:
+ return Nodes.RaiseStatNode(pos,
+ exc_type = exc_type,
+ exc_value = exc_value,
+ exc_tb = exc_tb,
+ cause = cause)
+ else:
+ return Nodes.ReraiseStatNode(pos)
+
+
+def p_import_statement(s):
+ # s.sy in ('import', 'cimport')
+ pos = s.position()
+ kind = s.sy
+ s.next()
+ items = [p_dotted_name(s, as_allowed=1)]
+ while s.sy == ',':
+ s.next()
+ items.append(p_dotted_name(s, as_allowed=1))
+ stats = []
+ is_absolute = Future.absolute_import in s.context.future_directives
+ for pos, target_name, dotted_name, as_name in items:
+ if kind == 'cimport':
+ stat = Nodes.CImportStatNode(
+ pos,
+ module_name=dotted_name,
+ as_name=as_name,
+ is_absolute=is_absolute)
+ else:
+ if as_name and "." in dotted_name:
+ name_list = ExprNodes.ListNode(pos, args=[
+ ExprNodes.IdentifierStringNode(pos, value=s.context.intern_ustring("*"))])
+ else:
+ name_list = None
+ stat = Nodes.SingleAssignmentNode(
+ pos,
+ lhs=ExprNodes.NameNode(pos, name=as_name or target_name),
+ rhs=ExprNodes.ImportNode(
+ pos,
+ module_name=ExprNodes.IdentifierStringNode(pos, value=dotted_name),
+ level=0 if is_absolute else None,
+ name_list=name_list))
+ stats.append(stat)
+ return Nodes.StatListNode(pos, stats=stats)
+
+
+def p_from_import_statement(s, first_statement = 0):
+ # s.sy == 'from'
+ pos = s.position()
+ s.next()
+ if s.sy == '.':
+ # count relative import level
+ level = 0
+ while s.sy == '.':
+ level += 1
+ s.next()
+ else:
+ level = None
+ if level is not None and s.sy in ('import', 'cimport'):
+ # we are dealing with "from .. import foo, bar"
+ dotted_name_pos, dotted_name = s.position(), s.context.intern_ustring('')
+ else:
+ if level is None and Future.absolute_import in s.context.future_directives:
+ level = 0
+ (dotted_name_pos, _, dotted_name, _) = p_dotted_name(s, as_allowed=False)
+ if s.sy not in ('import', 'cimport'):
+ s.error("Expected 'import' or 'cimport'")
+ kind = s.sy
+ s.next()
+
+ is_cimport = kind == 'cimport'
+ is_parenthesized = False
+ if s.sy == '*':
+ imported_names = [(s.position(), s.context.intern_ustring("*"), None, None)]
+ s.next()
+ else:
+ if s.sy == '(':
+ is_parenthesized = True
+ s.next()
+ imported_names = [p_imported_name(s, is_cimport)]
+ while s.sy == ',':
+ s.next()
+ if is_parenthesized and s.sy == ')':
+ break
+ imported_names.append(p_imported_name(s, is_cimport))
+ if is_parenthesized:
+ s.expect(')')
+ if dotted_name == '__future__':
+ if not first_statement:
+ s.error("from __future__ imports must occur at the beginning of the file")
+ elif level:
+ s.error("invalid syntax")
+ else:
+ for (name_pos, name, as_name, kind) in imported_names:
+ if name == "braces":
+ s.error("not a chance", name_pos)
+ break
+ try:
+ directive = getattr(Future, name)
+ except AttributeError:
+ s.error("future feature %s is not defined" % name, name_pos)
+ break
+ s.context.future_directives.add(directive)
+ return Nodes.PassStatNode(pos)
+ elif kind == 'cimport':
+ return Nodes.FromCImportStatNode(
+ pos, module_name=dotted_name,
+ relative_level=level,
+ imported_names=imported_names)
+ else:
+ imported_name_strings = []
+ items = []
+ for (name_pos, name, as_name, kind) in imported_names:
+ imported_name_strings.append(
+ ExprNodes.IdentifierStringNode(name_pos, value=name))
+ items.append(
+ (name, ExprNodes.NameNode(name_pos, name=as_name or name)))
+ import_list = ExprNodes.ListNode(
+ imported_names[0][0], args=imported_name_strings)
+ return Nodes.FromImportStatNode(pos,
+ module = ExprNodes.ImportNode(dotted_name_pos,
+ module_name = ExprNodes.IdentifierStringNode(pos, value = dotted_name),
+ level = level,
+ name_list = import_list),
+ items = items)
+
+
+imported_name_kinds = cython.declare(set, set(['class', 'struct', 'union']))
+
+def p_imported_name(s, is_cimport):
+ pos = s.position()
+ kind = None
+ if is_cimport and s.systring in imported_name_kinds:
+ kind = s.systring
+ warning(pos, 'the "from module cimport %s name" syntax is deprecated and '
+ 'will be removed in Cython 3.0' % kind, 2)
+ s.next()
+ name = p_ident(s)
+ as_name = p_as_name(s)
+ return (pos, name, as_name, kind)
+
+
+def p_dotted_name(s, as_allowed):
+ pos = s.position()
+ target_name = p_ident(s)
+ as_name = None
+ names = [target_name]
+ while s.sy == '.':
+ s.next()
+ names.append(p_ident(s))
+ if as_allowed:
+ as_name = p_as_name(s)
+ return (pos, target_name, s.context.intern_ustring(u'.'.join(names)), as_name)
+
+
+def p_as_name(s):
+ if s.sy == 'IDENT' and s.systring == 'as':
+ s.next()
+ return p_ident(s)
+ else:
+ return None
+
+
+def p_assert_statement(s):
+ # s.sy == 'assert'
+ pos = s.position()
+ s.next()
+ cond = p_test(s)
+ if s.sy == ',':
+ s.next()
+ value = p_test(s)
+ else:
+ value = None
+ return Nodes.AssertStatNode(pos, cond = cond, value = value)
+
+
+statement_terminators = cython.declare(set, set([';', 'NEWLINE', 'EOF']))
+
+def p_if_statement(s):
+ # s.sy == 'if'
+ pos = s.position()
+ s.next()
+ if_clauses = [p_if_clause(s)]
+ while s.sy == 'elif':
+ s.next()
+ if_clauses.append(p_if_clause(s))
+ else_clause = p_else_clause(s)
+ return Nodes.IfStatNode(pos,
+ if_clauses = if_clauses, else_clause = else_clause)
+
+def p_if_clause(s):
+ pos = s.position()
+ test = p_test(s)
+ body = p_suite(s)
+ return Nodes.IfClauseNode(pos,
+ condition = test, body = body)
+
+def p_else_clause(s):
+ if s.sy == 'else':
+ s.next()
+ return p_suite(s)
+ else:
+ return None
+
+def p_while_statement(s):
+ # s.sy == 'while'
+ pos = s.position()
+ s.next()
+ test = p_test(s)
+ body = p_suite(s)
+ else_clause = p_else_clause(s)
+ return Nodes.WhileStatNode(pos,
+ condition = test, body = body,
+ else_clause = else_clause)
+
+
+def p_for_statement(s, is_async=False):
+ # s.sy == 'for'
+ pos = s.position()
+ s.next()
+ kw = p_for_bounds(s, allow_testlist=True, is_async=is_async)
+ body = p_suite(s)
+ else_clause = p_else_clause(s)
+ kw.update(body=body, else_clause=else_clause, is_async=is_async)
+ return Nodes.ForStatNode(pos, **kw)
+
+
+def p_for_bounds(s, allow_testlist=True, is_async=False):
+ target = p_for_target(s)
+ if s.sy == 'in':
+ s.next()
+ iterator = p_for_iterator(s, allow_testlist, is_async=is_async)
+ return dict(target=target, iterator=iterator)
+ elif not s.in_python_file and not is_async:
+ if s.sy == 'from':
+ s.next()
+ bound1 = p_bit_expr(s)
+ else:
+ # Support shorter "for a <= x < b" syntax
+ bound1, target = target, None
+ rel1 = p_for_from_relation(s)
+ name2_pos = s.position()
+ name2 = p_ident(s)
+ rel2_pos = s.position()
+ rel2 = p_for_from_relation(s)
+ bound2 = p_bit_expr(s)
+ step = p_for_from_step(s)
+ if target is None:
+ target = ExprNodes.NameNode(name2_pos, name = name2)
+ else:
+ if not target.is_name:
+ error(target.pos,
+ "Target of for-from statement must be a variable name")
+ elif name2 != target.name:
+ error(name2_pos,
+ "Variable name in for-from range does not match target")
+ if rel1[0] != rel2[0]:
+ error(rel2_pos,
+ "Relation directions in for-from do not match")
+ return dict(target = target,
+ bound1 = bound1,
+ relation1 = rel1,
+ relation2 = rel2,
+ bound2 = bound2,
+ step = step,
+ )
+ else:
+ s.expect('in')
+ return {}
+
+def p_for_from_relation(s):
+ if s.sy in inequality_relations:
+ op = s.sy
+ s.next()
+ return op
+ else:
+ s.error("Expected one of '<', '<=', '>' '>='")
+
+def p_for_from_step(s):
+ if s.sy == 'IDENT' and s.systring == 'by':
+ s.next()
+ step = p_bit_expr(s)
+ return step
+ else:
+ return None
+
+inequality_relations = cython.declare(set, set(['<', '<=', '>', '>=']))
+
+def p_target(s, terminator):
+ pos = s.position()
+ expr = p_starred_expr(s)
+ if s.sy == ',':
+ s.next()
+ exprs = [expr]
+ while s.sy != terminator:
+ exprs.append(p_starred_expr(s))
+ if s.sy != ',':
+ break
+ s.next()
+ return ExprNodes.TupleNode(pos, args = exprs)
+ else:
+ return expr
+
+
+def p_for_target(s):
+ return p_target(s, 'in')
+
+
+def p_for_iterator(s, allow_testlist=True, is_async=False):
+ pos = s.position()
+ if allow_testlist:
+ expr = p_testlist(s)
+ else:
+ expr = p_or_test(s)
+ return (ExprNodes.AsyncIteratorNode if is_async else ExprNodes.IteratorNode)(pos, sequence=expr)
+
+
+def p_try_statement(s):
+ # s.sy == 'try'
+ pos = s.position()
+ s.next()
+ body = p_suite(s)
+ except_clauses = []
+ else_clause = None
+ if s.sy in ('except', 'else'):
+ while s.sy == 'except':
+ except_clauses.append(p_except_clause(s))
+ if s.sy == 'else':
+ s.next()
+ else_clause = p_suite(s)
+ body = Nodes.TryExceptStatNode(pos,
+ body = body, except_clauses = except_clauses,
+ else_clause = else_clause)
+ if s.sy != 'finally':
+ return body
+ # try-except-finally is equivalent to nested try-except/try-finally
+ if s.sy == 'finally':
+ s.next()
+ finally_clause = p_suite(s)
+ return Nodes.TryFinallyStatNode(pos,
+ body = body, finally_clause = finally_clause)
+ else:
+ s.error("Expected 'except' or 'finally'")
+
+def p_except_clause(s):
+ # s.sy == 'except'
+ pos = s.position()
+ s.next()
+ exc_type = None
+ exc_value = None
+ is_except_as = False
+ if s.sy != ':':
+ exc_type = p_test(s)
+ # normalise into list of single exception tests
+ if isinstance(exc_type, ExprNodes.TupleNode):
+ exc_type = exc_type.args
+ else:
+ exc_type = [exc_type]
+ if s.sy == ',' or (s.sy == 'IDENT' and s.systring == 'as'
+ and s.context.language_level == 2):
+ s.next()
+ exc_value = p_test(s)
+ elif s.sy == 'IDENT' and s.systring == 'as':
+ # Py3 syntax requires a name here
+ s.next()
+ pos2 = s.position()
+ name = p_ident(s)
+ exc_value = ExprNodes.NameNode(pos2, name = name)
+ is_except_as = True
+ body = p_suite(s)
+ return Nodes.ExceptClauseNode(pos,
+ pattern = exc_type, target = exc_value,
+ body = body, is_except_as=is_except_as)
+
+def p_include_statement(s, ctx):
+ pos = s.position()
+ s.next() # 'include'
+ unicode_include_file_name = p_string_literal(s, 'u')[2]
+ s.expect_newline("Syntax error in include statement")
+ if s.compile_time_eval:
+ include_file_name = unicode_include_file_name
+ include_file_path = s.context.find_include_file(include_file_name, pos)
+ if include_file_path:
+ s.included_files.append(include_file_name)
+ with Utils.open_source_file(include_file_path) as f:
+ if Options.source_root:
+ import os
+ rel_path = os.path.relpath(include_file_path, Options.source_root)
+ else:
+ rel_path = None
+ source_desc = FileSourceDescriptor(include_file_path, rel_path)
+ s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
+ tree = p_statement_list(s2, ctx)
+ return tree
+ else:
+ return None
+ else:
+ return Nodes.PassStatNode(pos)
+
+
+def p_with_statement(s):
+ s.next() # 'with'
+ if s.systring == 'template' and not s.in_python_file:
+ node = p_with_template(s)
+ else:
+ node = p_with_items(s)
+ return node
+
+
+def p_with_items(s, is_async=False):
+ pos = s.position()
+ if not s.in_python_file and s.sy == 'IDENT' and s.systring in ('nogil', 'gil'):
+ if is_async:
+ s.error("with gil/nogil cannot be async")
+ state = s.systring
+ s.next()
+ if s.sy == ',':
+ s.next()
+ body = p_with_items(s)
+ else:
+ body = p_suite(s)
+ return Nodes.GILStatNode(pos, state=state, body=body)
+ else:
+ manager = p_test(s)
+ target = None
+ if s.sy == 'IDENT' and s.systring == 'as':
+ s.next()
+ target = p_starred_expr(s)
+ if s.sy == ',':
+ s.next()
+ body = p_with_items(s, is_async=is_async)
+ else:
+ body = p_suite(s)
+ return Nodes.WithStatNode(pos, manager=manager, target=target, body=body, is_async=is_async)
+
+
+def p_with_template(s):
+ pos = s.position()
+ templates = []
+ s.next()
+ s.expect('[')
+ templates.append(s.systring)
+ s.next()
+ while s.systring == ',':
+ s.next()
+ templates.append(s.systring)
+ s.next()
+ s.expect(']')
+ if s.sy == ':':
+ s.next()
+ s.expect_newline("Syntax error in template function declaration")
+ s.expect_indent()
+ body_ctx = Ctx()
+ body_ctx.templates = templates
+ func_or_var = p_c_func_or_var_declaration(s, pos, body_ctx)
+ s.expect_dedent()
+ return func_or_var
+ else:
+ error(pos, "Syntax error in template function declaration")
+
+def p_simple_statement(s, first_statement = 0):
+ #print "p_simple_statement:", s.sy, s.systring ###
+ if s.sy == 'global':
+ node = p_global_statement(s)
+ elif s.sy == 'nonlocal':
+ node = p_nonlocal_statement(s)
+ elif s.sy == 'print':
+ node = p_print_statement(s)
+ elif s.sy == 'exec':
+ node = p_exec_statement(s)
+ elif s.sy == 'del':
+ node = p_del_statement(s)
+ elif s.sy == 'break':
+ node = p_break_statement(s)
+ elif s.sy == 'continue':
+ node = p_continue_statement(s)
+ elif s.sy == 'return':
+ node = p_return_statement(s)
+ elif s.sy == 'raise':
+ node = p_raise_statement(s)
+ elif s.sy in ('import', 'cimport'):
+ node = p_import_statement(s)
+ elif s.sy == 'from':
+ node = p_from_import_statement(s, first_statement = first_statement)
+ elif s.sy == 'yield':
+ node = p_yield_statement(s)
+ elif s.sy == 'assert':
+ node = p_assert_statement(s)
+ elif s.sy == 'pass':
+ node = p_pass_statement(s)
+ else:
+ node = p_expression_or_assignment(s)
+ return node
+
+def p_simple_statement_list(s, ctx, first_statement = 0):
+ # Parse a series of simple statements on one line
+ # separated by semicolons.
+ stat = p_simple_statement(s, first_statement = first_statement)
+ pos = stat.pos
+ stats = []
+ if not isinstance(stat, Nodes.PassStatNode):
+ stats.append(stat)
+ while s.sy == ';':
+ #print "p_simple_statement_list: maybe more to follow" ###
+ s.next()
+ if s.sy in ('NEWLINE', 'EOF'):
+ break
+ stat = p_simple_statement(s, first_statement = first_statement)
+ if isinstance(stat, Nodes.PassStatNode):
+ continue
+ stats.append(stat)
+ first_statement = False
+
+ if not stats:
+ stat = Nodes.PassStatNode(pos)
+ elif len(stats) == 1:
+ stat = stats[0]
+ else:
+ stat = Nodes.StatListNode(pos, stats = stats)
+
+ if s.sy not in ('NEWLINE', 'EOF'):
+ # provide a better error message for users who accidentally write Cython code in .py files
+ if isinstance(stat, Nodes.ExprStatNode):
+ if stat.expr.is_name and stat.expr.name == 'cdef':
+ s.error("The 'cdef' keyword is only allowed in Cython files (pyx/pxi/pxd)", pos)
+ s.expect_newline("Syntax error in simple statement list")
+
+ return stat
+
+def p_compile_time_expr(s):
+ old = s.compile_time_expr
+ s.compile_time_expr = 1
+ expr = p_testlist(s)
+ s.compile_time_expr = old
+ return expr
+
+def p_DEF_statement(s):
+ pos = s.position()
+ denv = s.compile_time_env
+ s.next() # 'DEF'
+ name = p_ident(s)
+ s.expect('=')
+ expr = p_compile_time_expr(s)
+ if s.compile_time_eval:
+ value = expr.compile_time_value(denv)
+ #print "p_DEF_statement: %s = %r" % (name, value) ###
+ denv.declare(name, value)
+ s.expect_newline("Expected a newline", ignore_semicolon=True)
+ return Nodes.PassStatNode(pos)
+
+def p_IF_statement(s, ctx):
+ pos = s.position()
+ saved_eval = s.compile_time_eval
+ current_eval = saved_eval
+ denv = s.compile_time_env
+ result = None
+ while 1:
+ s.next() # 'IF' or 'ELIF'
+ expr = p_compile_time_expr(s)
+ s.compile_time_eval = current_eval and bool(expr.compile_time_value(denv))
+ body = p_suite(s, ctx)
+ if s.compile_time_eval:
+ result = body
+ current_eval = 0
+ if s.sy != 'ELIF':
+ break
+ if s.sy == 'ELSE':
+ s.next()
+ s.compile_time_eval = current_eval
+ body = p_suite(s, ctx)
+ if current_eval:
+ result = body
+ if not result:
+ result = Nodes.PassStatNode(pos)
+ s.compile_time_eval = saved_eval
+ return result
+
+def p_statement(s, ctx, first_statement = 0):
+ cdef_flag = ctx.cdef_flag
+ decorators = None
+ if s.sy == 'ctypedef':
+ if ctx.level not in ('module', 'module_pxd'):
+ s.error("ctypedef statement not allowed here")
+ #if ctx.api:
+ # error(s.position(), "'api' not allowed with 'ctypedef'")
+ return p_ctypedef_statement(s, ctx)
+ elif s.sy == 'DEF':
+ return p_DEF_statement(s)
+ elif s.sy == 'IF':
+ return p_IF_statement(s, ctx)
+ elif s.sy == '@':
+ if ctx.level not in ('module', 'class', 'c_class', 'function', 'property', 'module_pxd', 'c_class_pxd', 'other'):
+ s.error('decorator not allowed here')
+ s.level = ctx.level
+ decorators = p_decorators(s)
+ if not ctx.allow_struct_enum_decorator and s.sy not in ('def', 'cdef', 'cpdef', 'class', 'async'):
+ if s.sy == 'IDENT' and s.systring == 'async':
+ pass # handled below
+ else:
+ s.error("Decorators can only be followed by functions or classes")
+ elif s.sy == 'pass' and cdef_flag:
+ # empty cdef block
+ return p_pass_statement(s, with_newline=1)
+
+ overridable = 0
+ if s.sy == 'cdef':
+ cdef_flag = 1
+ s.next()
+ elif s.sy == 'cpdef':
+ cdef_flag = 1
+ overridable = 1
+ s.next()
+ if cdef_flag:
+ if ctx.level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'):
+ s.error('cdef statement not allowed here')
+ s.level = ctx.level
+ node = p_cdef_statement(s, ctx(overridable=overridable))
+ if decorators is not None:
+ tup = (Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode)
+ if ctx.allow_struct_enum_decorator:
+ tup += (Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode)
+ if not isinstance(node, tup):
+ s.error("Decorators can only be followed by functions or classes")
+ node.decorators = decorators
+ return node
+ else:
+ if ctx.api:
+ s.error("'api' not allowed with this statement", fatal=False)
+ elif s.sy == 'def':
+ # def statements aren't allowed in pxd files, except
+ # as part of a cdef class
+ if ('pxd' in ctx.level) and (ctx.level != 'c_class_pxd'):
+ s.error('def statement not allowed here')
+ s.level = ctx.level
+ return p_def_statement(s, decorators)
+ elif s.sy == 'class':
+ if ctx.level not in ('module', 'function', 'class', 'other'):
+ s.error("class definition not allowed here")
+ return p_class_statement(s, decorators)
+ elif s.sy == 'include':
+ if ctx.level not in ('module', 'module_pxd'):
+ s.error("include statement not allowed here")
+ return p_include_statement(s, ctx)
+ elif ctx.level == 'c_class' and s.sy == 'IDENT' and s.systring == 'property':
+ return p_property_decl(s)
+ elif s.sy == 'pass' and ctx.level != 'property':
+ return p_pass_statement(s, with_newline=True)
+ else:
+ if ctx.level in ('c_class_pxd', 'property'):
+ node = p_ignorable_statement(s)
+ if node is not None:
+ return node
+ s.error("Executable statement not allowed here")
+ if s.sy == 'if':
+ return p_if_statement(s)
+ elif s.sy == 'while':
+ return p_while_statement(s)
+ elif s.sy == 'for':
+ return p_for_statement(s)
+ elif s.sy == 'try':
+ return p_try_statement(s)
+ elif s.sy == 'with':
+ return p_with_statement(s)
+ elif s.sy == 'async':
+ s.next()
+ return p_async_statement(s, ctx, decorators)
+ else:
+ if s.sy == 'IDENT' and s.systring == 'async':
+ ident_name = s.systring
+ # PEP 492 enables the async/await keywords when it spots "async def ..."
+ s.next()
+ if s.sy == 'def':
+ return p_async_statement(s, ctx, decorators)
+ elif decorators:
+ s.error("Decorators can only be followed by functions or classes")
+ s.put_back('IDENT', ident_name) # re-insert original token
+ return p_simple_statement_list(s, ctx, first_statement=first_statement)
+
+
+def p_statement_list(s, ctx, first_statement = 0):
+ # Parse a series of statements separated by newlines.
+ pos = s.position()
+ stats = []
+ while s.sy not in ('DEDENT', 'EOF'):
+ stat = p_statement(s, ctx, first_statement = first_statement)
+ if isinstance(stat, Nodes.PassStatNode):
+ continue
+ stats.append(stat)
+ first_statement = False
+ if not stats:
+ return Nodes.PassStatNode(pos)
+ elif len(stats) == 1:
+ return stats[0]
+ else:
+ return Nodes.StatListNode(pos, stats = stats)
+
+
+def p_suite(s, ctx=Ctx()):
+ return p_suite_with_docstring(s, ctx, with_doc_only=False)[1]
+
+
+def p_suite_with_docstring(s, ctx, with_doc_only=False):
+ s.expect(':')
+ doc = None
+ if s.sy == 'NEWLINE':
+ s.next()
+ s.expect_indent()
+ if with_doc_only:
+ doc = p_doc_string(s)
+ body = p_statement_list(s, ctx)
+ s.expect_dedent()
+ else:
+ if ctx.api:
+ s.error("'api' not allowed with this statement", fatal=False)
+ if ctx.level in ('module', 'class', 'function', 'other'):
+ body = p_simple_statement_list(s, ctx)
+ else:
+ body = p_pass_statement(s)
+ s.expect_newline("Syntax error in declarations", ignore_semicolon=True)
+ if not with_doc_only:
+ doc, body = _extract_docstring(body)
+ return doc, body
+
+
+def p_positional_and_keyword_args(s, end_sy_set, templates = None):
+ """
+ Parses positional and keyword arguments. end_sy_set
+ should contain any s.sy that terminate the argument list.
+ Argument expansion (* and **) are not allowed.
+
+ Returns: (positional_args, keyword_args)
+ """
+ positional_args = []
+ keyword_args = []
+ pos_idx = 0
+
+ while s.sy not in end_sy_set:
+ if s.sy == '*' or s.sy == '**':
+ s.error('Argument expansion not allowed here.', fatal=False)
+
+ parsed_type = False
+ if s.sy == 'IDENT' and s.peek()[0] == '=':
+ ident = s.systring
+ s.next() # s.sy is '='
+ s.next()
+ if looking_at_expr(s):
+ arg = p_test(s)
+ else:
+ base_type = p_c_base_type(s, templates = templates)
+ declarator = p_c_declarator(s, empty = 1)
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ base_type = base_type, declarator = declarator)
+ parsed_type = True
+ keyword_node = ExprNodes.IdentifierStringNode(arg.pos, value=ident)
+ keyword_args.append((keyword_node, arg))
+ was_keyword = True
+
+ else:
+ if looking_at_expr(s):
+ arg = p_test(s)
+ else:
+ base_type = p_c_base_type(s, templates = templates)
+ declarator = p_c_declarator(s, empty = 1)
+ arg = Nodes.CComplexBaseTypeNode(base_type.pos,
+ base_type = base_type, declarator = declarator)
+ parsed_type = True
+ positional_args.append(arg)
+ pos_idx += 1
+ if len(keyword_args) > 0:
+ s.error("Non-keyword arg following keyword arg",
+ pos=arg.pos)
+
+ if s.sy != ',':
+ if s.sy not in end_sy_set:
+ if parsed_type:
+ s.error("Unmatched %s" % " or ".join(end_sy_set))
+ break
+ s.next()
+ return positional_args, keyword_args
+
+def p_c_base_type(s, self_flag = 0, nonempty = 0, templates = None):
+ # If self_flag is true, this is the base type for the
+ # self argument of a C method of an extension type.
+ if s.sy == '(':
+ return p_c_complex_base_type(s, templates = templates)
+ else:
+ return p_c_simple_base_type(s, self_flag, nonempty = nonempty, templates = templates)
+
+def p_calling_convention(s):
+ if s.sy == 'IDENT' and s.systring in calling_convention_words:
+ result = s.systring
+ s.next()
+ return result
+ else:
+ return ""
+
+
+calling_convention_words = cython.declare(
+ set, set(["__stdcall", "__cdecl", "__fastcall"]))
+
+
+def p_c_complex_base_type(s, templates = None):
+ # s.sy == '('
+ pos = s.position()
+ s.next()
+ base_type = p_c_base_type(s, templates=templates)
+ declarator = p_c_declarator(s, empty=True)
+ type_node = Nodes.CComplexBaseTypeNode(
+ pos, base_type=base_type, declarator=declarator)
+ if s.sy == ',':
+ components = [type_node]
+ while s.sy == ',':
+ s.next()
+ if s.sy == ')':
+ break
+ base_type = p_c_base_type(s, templates=templates)
+ declarator = p_c_declarator(s, empty=True)
+ components.append(Nodes.CComplexBaseTypeNode(
+ pos, base_type=base_type, declarator=declarator))
+ type_node = Nodes.CTupleBaseTypeNode(pos, components = components)
+
+ s.expect(')')
+ if s.sy == '[':
+ if is_memoryviewslice_access(s):
+ type_node = p_memoryviewslice_access(s, type_node)
+ else:
+ type_node = p_buffer_or_template(s, type_node, templates)
+ return type_node
+
+
+def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
+ #print "p_c_simple_base_type: self_flag =", self_flag, nonempty
+ is_basic = 0
+ signed = 1
+ longness = 0
+ complex = 0
+ module_path = []
+ pos = s.position()
+ if not s.sy == 'IDENT':
+ error(pos, "Expected an identifier, found '%s'" % s.sy)
+ if s.systring == 'const':
+ s.next()
+ base_type = p_c_base_type(s, self_flag=self_flag, nonempty=nonempty, templates=templates)
+ if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
+ # reverse order to avoid having to write "(const int)[:]"
+ base_type.base_type_node = Nodes.CConstTypeNode(pos, base_type=base_type.base_type_node)
+ return base_type
+ return Nodes.CConstTypeNode(pos, base_type=base_type)
+ if looking_at_base_type(s):
+ #print "p_c_simple_base_type: looking_at_base_type at", s.position()
+ is_basic = 1
+ if s.sy == 'IDENT' and s.systring in special_basic_c_types:
+ signed, longness = special_basic_c_types[s.systring]
+ name = s.systring
+ s.next()
+ else:
+ signed, longness = p_sign_and_longness(s)
+ if s.sy == 'IDENT' and s.systring in basic_c_type_names:
+ name = s.systring
+ s.next()
+ else:
+ name = 'int' # long [int], short [int], long [int] complex, etc.
+ if s.sy == 'IDENT' and s.systring == 'complex':
+ complex = 1
+ s.next()
+ elif looking_at_dotted_name(s):
+ #print "p_c_simple_base_type: looking_at_type_name at", s.position()
+ name = s.systring
+ s.next()
+ while s.sy == '.':
+ module_path.append(name)
+ s.next()
+ name = p_ident(s)
+ else:
+ name = s.systring
+ s.next()
+ if nonempty and s.sy != 'IDENT':
+ # Make sure this is not a declaration of a variable or function.
+ if s.sy == '(':
+ s.next()
+ if (s.sy == '*' or s.sy == '**' or s.sy == '&'
+ or (s.sy == 'IDENT' and s.systring in calling_convention_words)):
+ s.put_back('(', '(')
+ else:
+ s.put_back('(', '(')
+ s.put_back('IDENT', name)
+ name = None
+ elif s.sy not in ('*', '**', '[', '&'):
+ s.put_back('IDENT', name)
+ name = None
+
+ type_node = Nodes.CSimpleBaseTypeNode(pos,
+ name = name, module_path = module_path,
+ is_basic_c_type = is_basic, signed = signed,
+ complex = complex, longness = longness,
+ is_self_arg = self_flag, templates = templates)
+
+ # declarations here.
+ if s.sy == '[':
+ if is_memoryviewslice_access(s):
+ type_node = p_memoryviewslice_access(s, type_node)
+ else:
+ type_node = p_buffer_or_template(s, type_node, templates)
+
+ if s.sy == '.':
+ s.next()
+ name = p_ident(s)
+ type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
+
+ return type_node
+
+def p_buffer_or_template(s, base_type_node, templates):
+ # s.sy == '['
+ pos = s.position()
+ s.next()
+ # Note that buffer_positional_options_count=1, so the only positional argument is dtype.
+ # For templated types, all parameters are types.
+ positional_args, keyword_args = (
+ p_positional_and_keyword_args(s, (']',), templates)
+ )
+ s.expect(']')
+
+ if s.sy == '[':
+ base_type_node = p_buffer_or_template(s, base_type_node, templates)
+
+ keyword_dict = ExprNodes.DictNode(pos,
+ key_value_pairs = [
+ ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)
+ for key, value in keyword_args
+ ])
+ result = Nodes.TemplatedTypeNode(pos,
+ positional_args = positional_args,
+ keyword_args = keyword_dict,
+ base_type_node = base_type_node)
+ return result
+
+def p_bracketed_base_type(s, base_type_node, nonempty, empty):
+ # s.sy == '['
+ if empty and not nonempty:
+ # sizeof-like thing. Only anonymous C arrays allowed (int[SIZE]).
+ return base_type_node
+ elif not empty and nonempty:
+ # declaration of either memoryview slice or buffer.
+ if is_memoryviewslice_access(s):
+ return p_memoryviewslice_access(s, base_type_node)
+ else:
+ return p_buffer_or_template(s, base_type_node, None)
+ # return p_buffer_access(s, base_type_node)
+ elif not empty and not nonempty:
+ # only anonymous C arrays and memoryview slice arrays here. We
+ # disallow buffer declarations for now, due to ambiguity with anonymous
+ # C arrays.
+ if is_memoryviewslice_access(s):
+ return p_memoryviewslice_access(s, base_type_node)
+ else:
+ return base_type_node
+
+def is_memoryviewslice_access(s):
+ # s.sy == '['
+ # a memoryview slice declaration is distinguishable from a buffer access
+ # declaration by the first entry in the bracketed list. The buffer will
+ # not have an unnested colon in the first entry; the memoryview slice will.
+ saved = [(s.sy, s.systring)]
+ s.next()
+ retval = False
+ if s.systring == ':':
+ retval = True
+ elif s.sy == 'INT':
+ saved.append((s.sy, s.systring))
+ s.next()
+ if s.sy == ':':
+ retval = True
+
+ for sv in saved[::-1]:
+ s.put_back(*sv)
+
+ return retval
+
+def p_memoryviewslice_access(s, base_type_node):
+ # s.sy == '['
+ pos = s.position()
+ s.next()
+ subscripts, _ = p_subscript_list(s)
+ # make sure each entry in subscripts is a slice
+ for subscript in subscripts:
+ if len(subscript) < 2:
+ s.error("An axis specification in memoryview declaration does not have a ':'.")
+ s.expect(']')
+ indexes = make_slice_nodes(pos, subscripts)
+ result = Nodes.MemoryViewSliceTypeNode(pos,
+ base_type_node = base_type_node,
+ axes = indexes)
+ return result
+
+def looking_at_name(s):
+ return s.sy == 'IDENT' and not s.systring in calling_convention_words
+
+def looking_at_expr(s):
+ if s.systring in base_type_start_words:
+ return False
+ elif s.sy == 'IDENT':
+ is_type = False
+ name = s.systring
+ dotted_path = []
+ s.next()
+
+ while s.sy == '.':
+ s.next()
+ dotted_path.append(s.systring)
+ s.expect('IDENT')
+
+ saved = s.sy, s.systring
+ if s.sy == 'IDENT':
+ is_type = True
+ elif s.sy == '*' or s.sy == '**':
+ s.next()
+ is_type = s.sy in (')', ']')
+ s.put_back(*saved)
+ elif s.sy == '(':
+ s.next()
+ is_type = s.sy == '*'
+ s.put_back(*saved)
+ elif s.sy == '[':
+ s.next()
+ is_type = s.sy == ']' or not looking_at_expr(s) # could be a nested template type
+ s.put_back(*saved)
+
+ dotted_path.reverse()
+ for p in dotted_path:
+ s.put_back('IDENT', p)
+ s.put_back('.', '.')
+
+ s.put_back('IDENT', name)
+ return not is_type and saved[0]
+ else:
+ return True
+
+def looking_at_base_type(s):
+ #print "looking_at_base_type?", s.sy, s.systring, s.position()
+ return s.sy == 'IDENT' and s.systring in base_type_start_words
+
+def looking_at_dotted_name(s):
+ if s.sy == 'IDENT':
+ name = s.systring
+ s.next()
+ result = s.sy == '.'
+ s.put_back('IDENT', name)
+ return result
+ else:
+ return 0
+
+def looking_at_call(s):
+ "See if we're looking at a.b.c("
+ # Don't mess up the original position, so save and restore it.
+ # Unfortunately there's no good way to handle this, as a subsequent call
+ # to next() will not advance the position until it reads a new token.
+ position = s.start_line, s.start_col
+ result = looking_at_expr(s) == u'('
+ if not result:
+ s.start_line, s.start_col = position
+ return result
+
+basic_c_type_names = cython.declare(
+ set, set(["void", "char", "int", "float", "double", "bint"]))
+
+special_basic_c_types = cython.declare(dict, {
+ # name : (signed, longness)
+ "Py_UNICODE" : (0, 0),
+ "Py_UCS4" : (0, 0),
+ "Py_hash_t" : (2, 0),
+ "Py_ssize_t" : (2, 0),
+ "ssize_t" : (2, 0),
+ "size_t" : (0, 0),
+ "ptrdiff_t" : (2, 0),
+ "Py_tss_t" : (1, 0),
+})
+
+sign_and_longness_words = cython.declare(
+ set, set(["short", "long", "signed", "unsigned"]))
+
+base_type_start_words = cython.declare(
+ set,
+ basic_c_type_names
+ | sign_and_longness_words
+ | set(special_basic_c_types))
+
+struct_enum_union = cython.declare(
+ set, set(["struct", "union", "enum", "packed"]))
+
+def p_sign_and_longness(s):
+ signed = 1
+ longness = 0
+ while s.sy == 'IDENT' and s.systring in sign_and_longness_words:
+ if s.systring == 'unsigned':
+ signed = 0
+ elif s.systring == 'signed':
+ signed = 2
+ elif s.systring == 'short':
+ longness = -1
+ elif s.systring == 'long':
+ longness += 1
+ s.next()
+ return signed, longness
+
+def p_opt_cname(s):
+ literal = p_opt_string_literal(s, 'u')
+ if literal is not None:
+ cname = EncodedString(literal)
+ cname.encoding = s.source_encoding
+ else:
+ cname = None
+ return cname
+
+def p_c_declarator(s, ctx = Ctx(), empty = 0, is_type = 0, cmethod_flag = 0,
+ assignable = 0, nonempty = 0,
+ calling_convention_allowed = 0):
+ # If empty is true, the declarator must be empty. If nonempty is true,
+ # the declarator must be nonempty. Otherwise we don't care.
+ # If cmethod_flag is true, then if this declarator declares
+ # a function, it's a C method of an extension type.
+ pos = s.position()
+ if s.sy == '(':
+ s.next()
+ if s.sy == ')' or looking_at_name(s):
+ base = Nodes.CNameDeclaratorNode(pos, name=s.context.intern_ustring(u""), cname=None)
+ result = p_c_func_declarator(s, pos, ctx, base, cmethod_flag)
+ else:
+ result = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
+ cmethod_flag = cmethod_flag,
+ nonempty = nonempty,
+ calling_convention_allowed = 1)
+ s.expect(')')
+ else:
+ result = p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
+ assignable, nonempty)
+ if not calling_convention_allowed and result.calling_convention and s.sy != '(':
+ error(s.position(), "%s on something that is not a function"
+ % result.calling_convention)
+ while s.sy in ('[', '('):
+ pos = s.position()
+ if s.sy == '[':
+ result = p_c_array_declarator(s, result)
+ else: # sy == '('
+ s.next()
+ result = p_c_func_declarator(s, pos, ctx, result, cmethod_flag)
+ cmethod_flag = 0
+ return result
+
+def p_c_array_declarator(s, base):
+ pos = s.position()
+ s.next() # '['
+ if s.sy != ']':
+ dim = p_testlist(s)
+ else:
+ dim = None
+ s.expect(']')
+ return Nodes.CArrayDeclaratorNode(pos, base = base, dimension = dim)
+
+def p_c_func_declarator(s, pos, ctx, base, cmethod_flag):
+ # Opening paren has already been skipped
+ args = p_c_arg_list(s, ctx, cmethod_flag = cmethod_flag,
+ nonempty_declarators = 0)
+ ellipsis = p_optional_ellipsis(s)
+ s.expect(')')
+ nogil = p_nogil(s)
+ exc_val, exc_check = p_exception_value_clause(s)
+ # TODO - warning to enforce preferred exception specification order
+ nogil = nogil or p_nogil(s)
+ with_gil = p_with_gil(s)
+ return Nodes.CFuncDeclaratorNode(pos,
+ base = base, args = args, has_varargs = ellipsis,
+ exception_value = exc_val, exception_check = exc_check,
+ nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil)
+
+supported_overloaded_operators = cython.declare(set, set([
+ '+', '-', '*', '/', '%',
+ '++', '--', '~', '|', '&', '^', '<<', '>>', ',',
+ '==', '!=', '>=', '>', '<=', '<',
+ '[]', '()', '!', '=',
+ 'bool',
+]))
+
+def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
+ assignable, nonempty):
+ pos = s.position()
+ calling_convention = p_calling_convention(s)
+ if s.sy == '*':
+ s.next()
+ if s.systring == 'const':
+ const_pos = s.position()
+ s.next()
+ const_base = p_c_declarator(s, ctx, empty = empty,
+ is_type = is_type,
+ cmethod_flag = cmethod_flag,
+ assignable = assignable,
+ nonempty = nonempty)
+ base = Nodes.CConstDeclaratorNode(const_pos, base = const_base)
+ else:
+ base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
+ cmethod_flag = cmethod_flag,
+ assignable = assignable, nonempty = nonempty)
+ result = Nodes.CPtrDeclaratorNode(pos,
+ base = base)
+ elif s.sy == '**': # scanner returns this as a single token
+ s.next()
+ base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
+ cmethod_flag = cmethod_flag,
+ assignable = assignable, nonempty = nonempty)
+ result = Nodes.CPtrDeclaratorNode(pos,
+ base = Nodes.CPtrDeclaratorNode(pos,
+ base = base))
+ elif s.sy == '&':
+ s.next()
+ base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
+ cmethod_flag = cmethod_flag,
+ assignable = assignable, nonempty = nonempty)
+ result = Nodes.CReferenceDeclaratorNode(pos, base = base)
+ else:
+ rhs = None
+ if s.sy == 'IDENT':
+ name = s.systring
+ if empty:
+ error(s.position(), "Declarator should be empty")
+ s.next()
+ cname = p_opt_cname(s)
+ if name != 'operator' and s.sy == '=' and assignable:
+ s.next()
+ rhs = p_test(s)
+ else:
+ if nonempty:
+ error(s.position(), "Empty declarator")
+ name = ""
+ cname = None
+ if cname is None and ctx.namespace is not None and nonempty:
+ cname = ctx.namespace + "::" + name
+ if name == 'operator' and ctx.visibility == 'extern' and nonempty:
+ op = s.sy
+ if [1 for c in op if c in '+-*/<=>!%&|([^~,']:
+ s.next()
+ # Handle diphthong operators.
+ if op == '(':
+ s.expect(')')
+ op = '()'
+ elif op == '[':
+ s.expect(']')
+ op = '[]'
+ elif op in ('-', '+', '|', '&') and s.sy == op:
+ op *= 2 # ++, --, ...
+ s.next()
+ elif s.sy == '=':
+ op += s.sy # +=, -=, ...
+ s.next()
+ if op not in supported_overloaded_operators:
+ s.error("Overloading operator '%s' not yet supported." % op,
+ fatal=False)
+ name += op
+ elif op == 'IDENT':
+ op = s.systring;
+ if op not in supported_overloaded_operators:
+ s.error("Overloading operator '%s' not yet supported." % op,
+ fatal=False)
+ name = name + ' ' + op
+ s.next()
+ result = Nodes.CNameDeclaratorNode(pos,
+ name = name, cname = cname, default = rhs)
+ result.calling_convention = calling_convention
+ return result
+
+def p_nogil(s):
+ if s.sy == 'IDENT' and s.systring == 'nogil':
+ s.next()
+ return 1
+ else:
+ return 0
+
+def p_with_gil(s):
+ if s.sy == 'with':
+ s.next()
+ s.expect_keyword('gil')
+ return 1
+ else:
+ return 0
+
+def p_exception_value_clause(s):
+ exc_val = None
+ exc_check = 0
+
+ if s.sy == 'IDENT' and s.systring == 'noexcept':
+ s.next()
+ exc_check = False # No-op in Cython 0.29.x
+ elif s.sy == 'except':
+ s.next()
+ if s.sy == '*':
+ exc_check = 1
+ s.next()
+ elif s.sy == '+':
+ exc_check = '+'
+ s.next()
+ if s.sy == 'IDENT':
+ name = s.systring
+ s.next()
+ exc_val = p_name(s, name)
+ elif s.sy == '*':
+ exc_val = ExprNodes.CharNode(s.position(), value=u'*')
+ s.next()
+ else:
+ if s.sy == '?':
+ exc_check = 1
+ s.next()
+ exc_val = p_test(s)
+ return exc_val, exc_check
+
+c_arg_list_terminators = cython.declare(set, set(['*', '**', '.', ')', ':']))
+
+def p_c_arg_list(s, ctx = Ctx(), in_pyfunc = 0, cmethod_flag = 0,
+ nonempty_declarators = 0, kw_only = 0, annotated = 1):
+ # Comma-separated list of C argument declarations, possibly empty.
+ # May have a trailing comma.
+ args = []
+ is_self_arg = cmethod_flag
+ while s.sy not in c_arg_list_terminators:
+ args.append(p_c_arg_decl(s, ctx, in_pyfunc, is_self_arg,
+ nonempty = nonempty_declarators, kw_only = kw_only,
+ annotated = annotated))
+ if s.sy != ',':
+ break
+ s.next()
+ is_self_arg = 0
+ return args
+
+def p_optional_ellipsis(s):
+ if s.sy == '.':
+ expect_ellipsis(s)
+ return 1
+ else:
+ return 0
+
+def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0,
+ kw_only = 0, annotated = 1):
+ pos = s.position()
+ not_none = or_none = 0
+ default = None
+ annotation = None
+ if s.in_python_file:
+ # empty type declaration
+ base_type = Nodes.CSimpleBaseTypeNode(pos,
+ name = None, module_path = [],
+ is_basic_c_type = 0, signed = 0,
+ complex = 0, longness = 0,
+ is_self_arg = cmethod_flag, templates = None)
+ else:
+ base_type = p_c_base_type(s, cmethod_flag, nonempty = nonempty)
+ declarator = p_c_declarator(s, ctx, nonempty = nonempty)
+ if s.sy in ('not', 'or') and not s.in_python_file:
+ kind = s.sy
+ s.next()
+ if s.sy == 'IDENT' and s.systring == 'None':
+ s.next()
+ else:
+ s.error("Expected 'None'")
+ if not in_pyfunc:
+ error(pos, "'%s None' only allowed in Python functions" % kind)
+ or_none = kind == 'or'
+ not_none = kind == 'not'
+ if annotated and s.sy == ':':
+ s.next()
+ annotation = p_test(s)
+ if s.sy == '=':
+ s.next()
+ if 'pxd' in ctx.level:
+ if s.sy in ['*', '?']:
+ # TODO(github/1736): Make this an error for inline declarations.
+ default = ExprNodes.NoneNode(pos)
+ s.next()
+ elif 'inline' in ctx.modifiers:
+ default = p_test(s)
+ else:
+ error(pos, "default values cannot be specified in pxd files, use ? or *")
+ else:
+ default = p_test(s)
+ return Nodes.CArgDeclNode(pos,
+ base_type = base_type,
+ declarator = declarator,
+ not_none = not_none,
+ or_none = or_none,
+ default = default,
+ annotation = annotation,
+ kw_only = kw_only)
+
+def p_api(s):
+ if s.sy == 'IDENT' and s.systring == 'api':
+ s.next()
+ return 1
+ else:
+ return 0
+
+def p_cdef_statement(s, ctx):
+ pos = s.position()
+ ctx.visibility = p_visibility(s, ctx.visibility)
+ ctx.api = ctx.api or p_api(s)
+ if ctx.api:
+ if ctx.visibility not in ('private', 'public'):
+ error(pos, "Cannot combine 'api' with '%s'" % ctx.visibility)
+ if (ctx.visibility == 'extern') and s.sy == 'from':
+ return p_cdef_extern_block(s, pos, ctx)
+ elif s.sy == 'import':
+ s.next()
+ return p_cdef_extern_block(s, pos, ctx)
+ elif p_nogil(s):
+ ctx.nogil = 1
+ if ctx.overridable:
+ error(pos, "cdef blocks cannot be declared cpdef")
+ return p_cdef_block(s, ctx)
+ elif s.sy == ':':
+ if ctx.overridable:
+ error(pos, "cdef blocks cannot be declared cpdef")
+ return p_cdef_block(s, ctx)
+ elif s.sy == 'class':
+ if ctx.level not in ('module', 'module_pxd'):
+ error(pos, "Extension type definition not allowed here")
+ if ctx.overridable:
+ error(pos, "Extension types cannot be declared cpdef")
+ return p_c_class_definition(s, pos, ctx)
+ elif s.sy == 'IDENT' and s.systring == 'cppclass':
+ return p_cpp_class_definition(s, pos, ctx)
+ elif s.sy == 'IDENT' and s.systring in struct_enum_union:
+ if ctx.level not in ('module', 'module_pxd'):
+ error(pos, "C struct/union/enum definition not allowed here")
+ if ctx.overridable:
+ if s.systring != 'enum':
+ error(pos, "C struct/union cannot be declared cpdef")
+ return p_struct_enum(s, pos, ctx)
+ elif s.sy == 'IDENT' and s.systring == 'fused':
+ return p_fused_definition(s, pos, ctx)
+ else:
+ return p_c_func_or_var_declaration(s, pos, ctx)
+
+def p_cdef_block(s, ctx):
+ return p_suite(s, ctx(cdef_flag = 1))
+
+def p_cdef_extern_block(s, pos, ctx):
+ if ctx.overridable:
+ error(pos, "cdef extern blocks cannot be declared cpdef")
+ include_file = None
+ s.expect('from')
+ if s.sy == '*':
+ s.next()
+ else:
+ include_file = p_string_literal(s, 'u')[2]
+ ctx = ctx(cdef_flag = 1, visibility = 'extern')
+ if s.systring == "namespace":
+ s.next()
+ ctx.namespace = p_string_literal(s, 'u')[2]
+ if p_nogil(s):
+ ctx.nogil = 1
+
+ # Use "docstring" as verbatim string to include
+ verbatim_include, body = p_suite_with_docstring(s, ctx, True)
+
+ return Nodes.CDefExternNode(pos,
+ include_file = include_file,
+ verbatim_include = verbatim_include,
+ body = body,
+ namespace = ctx.namespace)
+
+def p_c_enum_definition(s, pos, ctx):
+ # s.sy == ident 'enum'
+ s.next()
+ if s.sy == 'IDENT':
+ name = s.systring
+ s.next()
+ cname = p_opt_cname(s)
+ if cname is None and ctx.namespace is not None:
+ cname = ctx.namespace + "::" + name
+ else:
+ name = None
+ cname = None
+ items = None
+ s.expect(':')
+ items = []
+ if s.sy != 'NEWLINE':
+ p_c_enum_line(s, ctx, items)
+ else:
+ s.next() # 'NEWLINE'
+ s.expect_indent()
+ while s.sy not in ('DEDENT', 'EOF'):
+ p_c_enum_line(s, ctx, items)
+ s.expect_dedent()
+ return Nodes.CEnumDefNode(
+ pos, name = name, cname = cname, items = items,
+ typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
+ create_wrapper = ctx.overridable,
+ api = ctx.api, in_pxd = ctx.level == 'module_pxd')
+
+def p_c_enum_line(s, ctx, items):
+ if s.sy != 'pass':
+ p_c_enum_item(s, ctx, items)
+ while s.sy == ',':
+ s.next()
+ if s.sy in ('NEWLINE', 'EOF'):
+ break
+ p_c_enum_item(s, ctx, items)
+ else:
+ s.next()
+ s.expect_newline("Syntax error in enum item list")
+
+def p_c_enum_item(s, ctx, items):
+ pos = s.position()
+ name = p_ident(s)
+ cname = p_opt_cname(s)
+ if cname is None and ctx.namespace is not None:
+ cname = ctx.namespace + "::" + name
+ value = None
+ if s.sy == '=':
+ s.next()
+ value = p_test(s)
+ items.append(Nodes.CEnumDefItemNode(pos,
+ name = name, cname = cname, value = value))
+
+def p_c_struct_or_union_definition(s, pos, ctx):
+ packed = False
+ if s.systring == 'packed':
+ packed = True
+ s.next()
+ if s.sy != 'IDENT' or s.systring != 'struct':
+ s.expected('struct')
+ # s.sy == ident 'struct' or 'union'
+ kind = s.systring
+ s.next()
+ name = p_ident(s)
+ cname = p_opt_cname(s)
+ if cname is None and ctx.namespace is not None:
+ cname = ctx.namespace + "::" + name
+ attributes = None
+ if s.sy == ':':
+ s.next()
+ s.expect('NEWLINE')
+ s.expect_indent()
+ attributes = []
+ body_ctx = Ctx()
+ while s.sy != 'DEDENT':
+ if s.sy != 'pass':
+ attributes.append(
+ p_c_func_or_var_declaration(s, s.position(), body_ctx))
+ else:
+ s.next()
+ s.expect_newline("Expected a newline")
+ s.expect_dedent()
+ else:
+ s.expect_newline("Syntax error in struct or union definition")
+ return Nodes.CStructOrUnionDefNode(pos,
+ name = name, cname = cname, kind = kind, attributes = attributes,
+ typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
+ api = ctx.api, in_pxd = ctx.level == 'module_pxd', packed = packed)
+
+def p_fused_definition(s, pos, ctx):
+ """
+ c(type)def fused my_fused_type:
+ ...
+ """
+ # s.systring == 'fused'
+
+ if ctx.level not in ('module', 'module_pxd'):
+ error(pos, "Fused type definition not allowed here")
+
+ s.next()
+ name = p_ident(s)
+
+ s.expect(":")
+ s.expect_newline()
+ s.expect_indent()
+
+ types = []
+ while s.sy != 'DEDENT':
+ if s.sy != 'pass':
+ #types.append(p_c_declarator(s))
+ types.append(p_c_base_type(s)) #, nonempty=1))
+ else:
+ s.next()
+
+ s.expect_newline()
+
+ s.expect_dedent()
+
+ if not types:
+ error(pos, "Need at least one type")
+
+ return Nodes.FusedTypeNode(pos, name=name, types=types)
+
+def p_struct_enum(s, pos, ctx):
+ if s.systring == 'enum':
+ return p_c_enum_definition(s, pos, ctx)
+ else:
+ return p_c_struct_or_union_definition(s, pos, ctx)
+
+def p_visibility(s, prev_visibility):
+ pos = s.position()
+ visibility = prev_visibility
+ if s.sy == 'IDENT' and s.systring in ('extern', 'public', 'readonly'):
+ visibility = s.systring
+ if prev_visibility != 'private' and visibility != prev_visibility:
+ s.error("Conflicting visibility options '%s' and '%s'"
+ % (prev_visibility, visibility), fatal=False)
+ s.next()
+ return visibility
+
+def p_c_modifiers(s):
+ if s.sy == 'IDENT' and s.systring in ('inline',):
+ modifier = s.systring
+ s.next()
+ return [modifier] + p_c_modifiers(s)
+ return []
+
+def p_c_func_or_var_declaration(s, pos, ctx):
+ cmethod_flag = ctx.level in ('c_class', 'c_class_pxd')
+ modifiers = p_c_modifiers(s)
+ base_type = p_c_base_type(s, nonempty = 1, templates = ctx.templates)
+ declarator = p_c_declarator(s, ctx(modifiers=modifiers), cmethod_flag = cmethod_flag,
+ assignable = 1, nonempty = 1)
+ declarator.overridable = ctx.overridable
+ if s.sy == 'IDENT' and s.systring == 'const' and ctx.level == 'cpp_class':
+ s.next()
+ is_const_method = 1
+ else:
+ is_const_method = 0
+ if s.sy == '->':
+ # Special enough to give a better error message and keep going.
+ s.error(
+ "Return type annotation is not allowed in cdef/cpdef signatures. "
+ "Please define it before the function name, as in C signatures.",
+ fatal=False)
+ s.next()
+ p_test(s) # Keep going, but ignore result.
+ if s.sy == ':':
+ if ctx.level not in ('module', 'c_class', 'module_pxd', 'c_class_pxd', 'cpp_class') and not ctx.templates:
+ s.error("C function definition not allowed here")
+ doc, suite = p_suite_with_docstring(s, Ctx(level='function'))
+ result = Nodes.CFuncDefNode(pos,
+ visibility = ctx.visibility,
+ base_type = base_type,
+ declarator = declarator,
+ body = suite,
+ doc = doc,
+ modifiers = modifiers,
+ api = ctx.api,
+ overridable = ctx.overridable,
+ is_const_method = is_const_method)
+ else:
+ #if api:
+ # s.error("'api' not allowed with variable declaration")
+ if is_const_method:
+ declarator.is_const_method = is_const_method
+ declarators = [declarator]
+ while s.sy == ',':
+ s.next()
+ if s.sy == 'NEWLINE':
+ break
+ declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag,
+ assignable = 1, nonempty = 1)
+ declarators.append(declarator)
+ doc_line = s.start_line + 1
+ s.expect_newline("Syntax error in C variable declaration", ignore_semicolon=True)
+ if ctx.level in ('c_class', 'c_class_pxd') and s.start_line == doc_line:
+ doc = p_doc_string(s)
+ else:
+ doc = None
+ result = Nodes.CVarDefNode(pos,
+ visibility = ctx.visibility,
+ base_type = base_type,
+ declarators = declarators,
+ in_pxd = ctx.level in ('module_pxd', 'c_class_pxd'),
+ doc = doc,
+ api = ctx.api,
+ modifiers = modifiers,
+ overridable = ctx.overridable)
+ return result
+
+def p_ctypedef_statement(s, ctx):
+ # s.sy == 'ctypedef'
+ pos = s.position()
+ s.next()
+ visibility = p_visibility(s, ctx.visibility)
+ api = p_api(s)
+ ctx = ctx(typedef_flag = 1, visibility = visibility)
+ if api:
+ ctx.api = 1
+ if s.sy == 'class':
+ return p_c_class_definition(s, pos, ctx)
+ elif s.sy == 'IDENT' and s.systring in struct_enum_union:
+ return p_struct_enum(s, pos, ctx)
+ elif s.sy == 'IDENT' and s.systring == 'fused':
+ return p_fused_definition(s, pos, ctx)
+ else:
+ base_type = p_c_base_type(s, nonempty = 1)
+ declarator = p_c_declarator(s, ctx, is_type = 1, nonempty = 1)
+ s.expect_newline("Syntax error in ctypedef statement", ignore_semicolon=True)
+ return Nodes.CTypeDefNode(
+ pos, base_type = base_type,
+ declarator = declarator,
+ visibility = visibility, api = api,
+ in_pxd = ctx.level == 'module_pxd')
+
+def p_decorators(s):
+ decorators = []
+ while s.sy == '@':
+ pos = s.position()
+ s.next()
+ decstring = p_dotted_name(s, as_allowed=0)[2]
+ names = decstring.split('.')
+ decorator = ExprNodes.NameNode(pos, name=s.context.intern_ustring(names[0]))
+ for name in names[1:]:
+ decorator = ExprNodes.AttributeNode(
+ pos, attribute=s.context.intern_ustring(name), obj=decorator)
+ if s.sy == '(':
+ decorator = p_call(s, decorator)
+ decorators.append(Nodes.DecoratorNode(pos, decorator=decorator))
+ s.expect_newline("Expected a newline after decorator")
+ return decorators
+
+
+def _reject_cdef_modifier_in_py(s, name):
+ """Step over incorrectly placed cdef modifiers (@see _CDEF_MODIFIERS) to provide a good error message for them.
+ """
+ if s.sy == 'IDENT' and name in _CDEF_MODIFIERS:
+ # Special enough to provide a good error message.
+ s.error("Cannot use cdef modifier '%s' in Python function signature. Use a decorator instead." % name, fatal=False)
+ return p_ident(s) # Keep going, in case there are other errors.
+ return name
+
+
+def p_def_statement(s, decorators=None, is_async_def=False):
+ # s.sy == 'def'
+ pos = s.position()
+ # PEP 492 switches the async/await keywords on in "async def" functions
+ if is_async_def:
+ s.enter_async()
+ s.next()
+ name = _reject_cdef_modifier_in_py(s, p_ident(s))
+ s.expect(
+ '(',
+ "Expected '(', found '%s'. Did you use cdef syntax in a Python declaration? "
+ "Use decorators and Python type annotations instead." % (
+ s.systring if s.sy == 'IDENT' else s.sy))
+ args, star_arg, starstar_arg = p_varargslist(s, terminator=')')
+ s.expect(')')
+ _reject_cdef_modifier_in_py(s, s.systring)
+ return_type_annotation = None
+ if s.sy == '->':
+ s.next()
+ return_type_annotation = p_test(s)
+ _reject_cdef_modifier_in_py(s, s.systring)
+
+ doc, body = p_suite_with_docstring(s, Ctx(level='function'))
+ if is_async_def:
+ s.exit_async()
+
+ return Nodes.DefNode(
+ pos, name=name, args=args, star_arg=star_arg, starstar_arg=starstar_arg,
+ doc=doc, body=body, decorators=decorators, is_async_def=is_async_def,
+ return_type_annotation=return_type_annotation)
+
+
+def p_varargslist(s, terminator=')', annotated=1):
+ args = p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1,
+ annotated = annotated)
+ star_arg = None
+ starstar_arg = None
+ if s.sy == '*':
+ s.next()
+ if s.sy == 'IDENT':
+ star_arg = p_py_arg_decl(s, annotated=annotated)
+ if s.sy == ',':
+ s.next()
+ args.extend(p_c_arg_list(s, in_pyfunc = 1,
+ nonempty_declarators = 1, kw_only = 1, annotated = annotated))
+ elif s.sy != terminator:
+ s.error("Syntax error in Python function argument list")
+ if s.sy == '**':
+ s.next()
+ starstar_arg = p_py_arg_decl(s, annotated=annotated)
+ if s.sy == ',':
+ s.next()
+ return (args, star_arg, starstar_arg)
+
+def p_py_arg_decl(s, annotated = 1):
+ pos = s.position()
+ name = p_ident(s)
+ annotation = None
+ if annotated and s.sy == ':':
+ s.next()
+ annotation = p_test(s)
+ return Nodes.PyArgDeclNode(pos, name = name, annotation = annotation)
+
+
+def p_class_statement(s, decorators):
+ # s.sy == 'class'
+ pos = s.position()
+ s.next()
+ class_name = EncodedString(p_ident(s))
+ class_name.encoding = s.source_encoding # FIXME: why is this needed?
+ arg_tuple = None
+ keyword_dict = None
+ if s.sy == '(':
+ positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
+ arg_tuple, keyword_dict = p_call_build_packed_args(pos, positional_args, keyword_args)
+ if arg_tuple is None:
+ # XXX: empty arg_tuple
+ arg_tuple = ExprNodes.TupleNode(pos, args=[])
+ doc, body = p_suite_with_docstring(s, Ctx(level='class'))
+ return Nodes.PyClassDefNode(
+ pos, name=class_name,
+ bases=arg_tuple,
+ keyword_args=keyword_dict,
+ doc=doc, body=body, decorators=decorators,
+ force_py3_semantics=s.context.language_level >= 3)
+
+
+def p_c_class_definition(s, pos, ctx):
+ # s.sy == 'class'
+ s.next()
+ module_path = []
+ class_name = p_ident(s)
+ while s.sy == '.':
+ s.next()
+ module_path.append(class_name)
+ class_name = p_ident(s)
+ if module_path and ctx.visibility != 'extern':
+ error(pos, "Qualified class name only allowed for 'extern' C class")
+ if module_path and s.sy == 'IDENT' and s.systring == 'as':
+ s.next()
+ as_name = p_ident(s)
+ else:
+ as_name = class_name
+ objstruct_name = None
+ typeobj_name = None
+ bases = None
+ check_size = None
+ if s.sy == '(':
+ positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
+ if keyword_args:
+ s.error("C classes cannot take keyword bases.")
+ bases, _ = p_call_build_packed_args(pos, positional_args, keyword_args)
+ if bases is None:
+ bases = ExprNodes.TupleNode(pos, args=[])
+
+ if s.sy == '[':
+ if ctx.visibility not in ('public', 'extern') and not ctx.api:
+ error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
+ objstruct_name, typeobj_name, check_size = p_c_class_options(s)
+ if s.sy == ':':
+ if ctx.level == 'module_pxd':
+ body_level = 'c_class_pxd'
+ else:
+ body_level = 'c_class'
+ doc, body = p_suite_with_docstring(s, Ctx(level=body_level))
+ else:
+ s.expect_newline("Syntax error in C class definition")
+ doc = None
+ body = None
+ if ctx.visibility == 'extern':
+ if not module_path:
+ error(pos, "Module name required for 'extern' C class")
+ if typeobj_name:
+ error(pos, "Type object name specification not allowed for 'extern' C class")
+ elif ctx.visibility == 'public':
+ if not objstruct_name:
+ error(pos, "Object struct name specification required for 'public' C class")
+ if not typeobj_name:
+ error(pos, "Type object name specification required for 'public' C class")
+ elif ctx.visibility == 'private':
+ if ctx.api:
+ if not objstruct_name:
+ error(pos, "Object struct name specification required for 'api' C class")
+ if not typeobj_name:
+ error(pos, "Type object name specification required for 'api' C class")
+ else:
+ error(pos, "Invalid class visibility '%s'" % ctx.visibility)
+ return Nodes.CClassDefNode(pos,
+ visibility = ctx.visibility,
+ typedef_flag = ctx.typedef_flag,
+ api = ctx.api,
+ module_name = ".".join(module_path),
+ class_name = class_name,
+ as_name = as_name,
+ bases = bases,
+ objstruct_name = objstruct_name,
+ typeobj_name = typeobj_name,
+ check_size = check_size,
+ in_pxd = ctx.level == 'module_pxd',
+ doc = doc,
+ body = body)
+
+
+def p_c_class_options(s):
+ objstruct_name = None
+ typeobj_name = None
+ check_size = None
+ s.expect('[')
+ while 1:
+ if s.sy != 'IDENT':
+ break
+ if s.systring == 'object':
+ s.next()
+ objstruct_name = p_ident(s)
+ elif s.systring == 'type':
+ s.next()
+ typeobj_name = p_ident(s)
+ elif s.systring == 'check_size':
+ s.next()
+ check_size = p_ident(s)
+ if check_size not in ('ignore', 'warn', 'error'):
+ s.error("Expected one of ignore, warn or error, found %r" % check_size)
+ if s.sy != ',':
+ break
+ s.next()
+ s.expect(']', "Expected 'object', 'type' or 'check_size'")
+ return objstruct_name, typeobj_name, check_size
+
+
+def p_property_decl(s):
+ pos = s.position()
+ s.next() # 'property'
+ name = p_ident(s)
+ doc, body = p_suite_with_docstring(
+ s, Ctx(level='property'), with_doc_only=True)
+ return Nodes.PropertyNode(pos, name=name, doc=doc, body=body)
+
+
+def p_ignorable_statement(s):
+ """
+ Parses any kind of ignorable statement that is allowed in .pxd files.
+ """
+ if s.sy == 'BEGIN_STRING':
+ pos = s.position()
+ string_node = p_atom(s)
+ s.expect_newline("Syntax error in string", ignore_semicolon=True)
+ return Nodes.ExprStatNode(pos, expr=string_node)
+ return None
+
+
+def p_doc_string(s):
+ if s.sy == 'BEGIN_STRING':
+ pos = s.position()
+ kind, bytes_result, unicode_result = p_cat_string_literal(s)
+ s.expect_newline("Syntax error in doc string", ignore_semicolon=True)
+ if kind in ('u', ''):
+ return unicode_result
+ warning(pos, "Python 3 requires docstrings to be unicode strings")
+ return bytes_result
+ else:
+ return None
+
+
+def _extract_docstring(node):
+ """
+ Extract a docstring from a statement or from the first statement
+ in a list. Remove the statement if found. Return a tuple
+ (plain-docstring or None, node).
+ """
+ doc_node = None
+ if node is None:
+ pass
+ elif isinstance(node, Nodes.ExprStatNode):
+ if node.expr.is_string_literal:
+ doc_node = node.expr
+ node = Nodes.StatListNode(node.pos, stats=[])
+ elif isinstance(node, Nodes.StatListNode) and node.stats:
+ stats = node.stats
+ if isinstance(stats[0], Nodes.ExprStatNode):
+ if stats[0].expr.is_string_literal:
+ doc_node = stats[0].expr
+ del stats[0]
+
+ if doc_node is None:
+ doc = None
+ elif isinstance(doc_node, ExprNodes.BytesNode):
+ warning(node.pos,
+ "Python 3 requires docstrings to be unicode strings")
+ doc = doc_node.value
+ elif isinstance(doc_node, ExprNodes.StringNode):
+ doc = doc_node.unicode_value
+ if doc is None:
+ doc = doc_node.value
+ else:
+ doc = doc_node.value
+ return doc, node
+
+
+def p_code(s, level=None, ctx=Ctx):
+ body = p_statement_list(s, ctx(level = level), first_statement = 1)
+ if s.sy != 'EOF':
+ s.error("Syntax error in statement [%s,%s]" % (
+ repr(s.sy), repr(s.systring)))
+ return body
+
+
+_match_compiler_directive_comment = cython.declare(object, re.compile(
+ r"^#\s*cython\s*:\s*((\w|[.])+\s*=.*)$").match)
+
+
+def p_compiler_directive_comments(s):
+ result = {}
+ while s.sy == 'commentline':
+ pos = s.position()
+ m = _match_compiler_directive_comment(s.systring)
+ if m:
+ directives_string = m.group(1).strip()
+ try:
+ new_directives = Options.parse_directive_list(directives_string, ignore_unknown=True)
+ except ValueError as e:
+ s.error(e.args[0], fatal=False)
+ s.next()
+ continue
+
+ for name in new_directives:
+ if name not in result:
+ pass
+ elif new_directives[name] == result[name]:
+ warning(pos, "Duplicate directive found: %s" % (name,))
+ else:
+ s.error("Conflicting settings found for top-level directive %s: %r and %r" % (
+ name, result[name], new_directives[name]), pos=pos)
+
+ if 'language_level' in new_directives:
+ # Make sure we apply the language level already to the first token that follows the comments.
+ s.context.set_language_level(new_directives['language_level'])
+
+ result.update(new_directives)
+
+ s.next()
+ return result
+
+
+def p_module(s, pxd, full_module_name, ctx=Ctx):
+ pos = s.position()
+
+ directive_comments = p_compiler_directive_comments(s)
+ s.parse_comments = False
+
+ if s.context.language_level is None:
+ s.context.set_language_level(2) # Arcadia default.
+
+ if s.context.language_level is None:
+ s.context.set_language_level(2)
+ if pos[0].filename:
+ import warnings
+ warnings.warn(
+ "Cython directive 'language_level' not set, using 2 for now (Py2). "
+ "This will change in a later release! File: %s" % pos[0].filename,
+ FutureWarning,
+ stacklevel=1 if cython.compiled else 2,
+ )
+
+ doc = p_doc_string(s)
+ if pxd:
+ level = 'module_pxd'
+ else:
+ level = 'module'
+
+ body = p_statement_list(s, ctx(level=level), first_statement = 1)
+ if s.sy != 'EOF':
+ s.error("Syntax error in statement [%s,%s]" % (
+ repr(s.sy), repr(s.systring)))
+ return ModuleNode(pos, doc = doc, body = body,
+ full_module_name = full_module_name,
+ directive_comments = directive_comments)
+
+def p_template_definition(s):
+ name = p_ident(s)
+ if s.sy == '=':
+ s.expect('=')
+ s.expect('*')
+ required = False
+ else:
+ required = True
+ return name, required
+
+def p_cpp_class_definition(s, pos, ctx):
+ # s.sy == 'cppclass'
+ s.next()
+ module_path = []
+ class_name = p_ident(s)
+ cname = p_opt_cname(s)
+ if cname is None and ctx.namespace is not None:
+ cname = ctx.namespace + "::" + class_name
+ if s.sy == '.':
+ error(pos, "Qualified class name not allowed C++ class")
+ if s.sy == '[':
+ s.next()
+ templates = [p_template_definition(s)]
+ while s.sy == ',':
+ s.next()
+ templates.append(p_template_definition(s))
+ s.expect(']')
+ template_names = [name for name, required in templates]
+ else:
+ templates = None
+ template_names = None
+ if s.sy == '(':
+ s.next()
+ base_classes = [p_c_base_type(s, templates = template_names)]
+ while s.sy == ',':
+ s.next()
+ base_classes.append(p_c_base_type(s, templates = template_names))
+ s.expect(')')
+ else:
+ base_classes = []
+ if s.sy == '[':
+ error(s.position(), "Name options not allowed for C++ class")
+ nogil = p_nogil(s)
+ if s.sy == ':':
+ s.next()
+ s.expect('NEWLINE')
+ s.expect_indent()
+ attributes = []
+ body_ctx = Ctx(visibility = ctx.visibility, level='cpp_class', nogil=nogil or ctx.nogil)
+ body_ctx.templates = template_names
+ while s.sy != 'DEDENT':
+ if s.sy != 'pass':
+ attributes.append(p_cpp_class_attribute(s, body_ctx))
+ else:
+ s.next()
+ s.expect_newline("Expected a newline")
+ s.expect_dedent()
+ else:
+ attributes = None
+ s.expect_newline("Syntax error in C++ class definition")
+ return Nodes.CppClassNode(pos,
+ name = class_name,
+ cname = cname,
+ base_classes = base_classes,
+ visibility = ctx.visibility,
+ in_pxd = ctx.level == 'module_pxd',
+ attributes = attributes,
+ templates = templates)
+
+def p_cpp_class_attribute(s, ctx):
+ decorators = None
+ if s.sy == '@':
+ decorators = p_decorators(s)
+ if s.systring == 'cppclass':
+ return p_cpp_class_definition(s, s.position(), ctx)
+ elif s.systring == 'ctypedef':
+ return p_ctypedef_statement(s, ctx)
+ elif s.sy == 'IDENT' and s.systring in struct_enum_union:
+ if s.systring != 'enum':
+ return p_cpp_class_definition(s, s.position(), ctx)
+ else:
+ return p_struct_enum(s, s.position(), ctx)
+ else:
+ node = p_c_func_or_var_declaration(s, s.position(), ctx)
+ if decorators is not None:
+ tup = Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode
+ if ctx.allow_struct_enum_decorator:
+ tup += Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode
+ if not isinstance(node, tup):
+ s.error("Decorators can only be followed by functions or classes")
+ node.decorators = decorators
+ return node
+
+
+#----------------------------------------------
+#
+# Debugging
+#
+#----------------------------------------------
+
+def print_parse_tree(f, node, level, key = None):
+ ind = " " * level
+ if node:
+ f.write(ind)
+ if key:
+ f.write("%s: " % key)
+ t = type(node)
+ if t is tuple:
+ f.write("(%s @ %s\n" % (node[0], node[1]))
+ for i in range(2, len(node)):
+ print_parse_tree(f, node[i], level+1)
+ f.write("%s)\n" % ind)
+ return
+ elif isinstance(node, Nodes.Node):
+ try:
+ tag = node.tag
+ except AttributeError:
+ tag = node.__class__.__name__
+ f.write("%s @ %s\n" % (tag, node.pos))
+ for name, value in node.__dict__.items():
+ if name != 'tag' and name != 'pos':
+ print_parse_tree(f, value, level+1, name)
+ return
+ elif t is list:
+ f.write("[\n")
+ for i in range(len(node)):
+ print_parse_tree(f, node[i], level+1)
+ f.write("%s]\n" % ind)
+ return
+ f.write("%s%s\n" % (ind, node))
diff --git a/contrib/tools/cython/Cython/Compiler/Pipeline.py b/contrib/tools/cython/Cython/Compiler/Pipeline.py
new file mode 100644
index 0000000000..5194c3e49b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Pipeline.py
@@ -0,0 +1,369 @@
+from __future__ import absolute_import
+
+import itertools
+from time import time
+
+from . import Errors
+from . import DebugFlags
+from . import Options
+from .Errors import CompileError, InternalError, AbortError
+from . import Naming
+
+#
+# Really small pipeline stages
+#
+def dumptree(t):
+ # For quick debugging in pipelines
+ print(t.dump())
+ return t
+
+def abort_on_errors(node):
+ # Stop the pipeline if there are any errors.
+ if Errors.num_errors != 0:
+ raise AbortError("pipeline break")
+ return node
+
+def parse_stage_factory(context):
+ def parse(compsrc):
+ source_desc = compsrc.source_desc
+ full_module_name = compsrc.full_module_name
+ initial_pos = (source_desc, 1, 0)
+ saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False
+ scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0)
+ Options.cimport_from_pyx = saved_cimport_from_pyx
+ tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
+ tree.compilation_source = compsrc
+ tree.scope = scope
+ tree.is_pxd = False
+ return tree
+ return parse
+
+def parse_pxd_stage_factory(context, scope, module_name):
+ def parse(source_desc):
+ tree = context.parse(source_desc, scope, pxd=True,
+ full_module_name=module_name)
+ tree.scope = scope
+ tree.is_pxd = True
+ return tree
+ return parse
+
+def generate_pyx_code_stage_factory(options, result):
+ def generate_pyx_code_stage(module_node):
+ module_node.process_implementation(options, result)
+ result.compilation_source = module_node.compilation_source
+ return result
+ return generate_pyx_code_stage
+
+
+def inject_pxd_code_stage_factory(context):
+ def inject_pxd_code_stage(module_node):
+ for name, (statlistnode, scope) in context.pxds.items():
+ module_node.merge_in(statlistnode, scope)
+ return module_node
+ return inject_pxd_code_stage
+
+
+def use_utility_code_definitions(scope, target, seen=None):
+ if seen is None:
+ seen = set()
+
+ for entry in scope.entries.values():
+ if entry in seen:
+ continue
+
+ seen.add(entry)
+ if entry.used and entry.utility_code_definition:
+ target.use_utility_code(entry.utility_code_definition)
+ for required_utility in entry.utility_code_definition.requires:
+ target.use_utility_code(required_utility)
+ elif entry.as_module:
+ use_utility_code_definitions(entry.as_module, target, seen)
+
+
+def sort_utility_codes(utilcodes):
+ ranks = {}
+ def get_rank(utilcode):
+ if utilcode not in ranks:
+ ranks[utilcode] = 0 # prevent infinite recursion on circular dependencies
+ original_order = len(ranks)
+ ranks[utilcode] = 1 + min([get_rank(dep) for dep in utilcode.requires or ()] or [-1]) + original_order * 1e-8
+ return ranks[utilcode]
+ for utilcode in utilcodes:
+ get_rank(utilcode)
+ return [utilcode for utilcode, _ in sorted(ranks.items(), key=lambda kv: kv[1])]
+
+
+def normalize_deps(utilcodes):
+ deps = {}
+ for utilcode in utilcodes:
+ deps[utilcode] = utilcode
+
+ def unify_dep(dep):
+ if dep in deps:
+ return deps[dep]
+ else:
+ deps[dep] = dep
+ return dep
+
+ for utilcode in utilcodes:
+ utilcode.requires = [unify_dep(dep) for dep in utilcode.requires or ()]
+
+
+def inject_utility_code_stage_factory(context):
+ def inject_utility_code_stage(module_node):
+ module_node.prepare_utility_code()
+ use_utility_code_definitions(context.cython_scope, module_node.scope)
+ module_node.scope.utility_code_list = sort_utility_codes(module_node.scope.utility_code_list)
+ normalize_deps(module_node.scope.utility_code_list)
+ added = []
+ # Note: the list might be extended inside the loop (if some utility code
+ # pulls in other utility code, explicitly or implicitly)
+ for utilcode in module_node.scope.utility_code_list:
+ if utilcode in added:
+ continue
+ added.append(utilcode)
+ if utilcode.requires:
+ for dep in utilcode.requires:
+ if dep not in added and dep not in module_node.scope.utility_code_list:
+ module_node.scope.utility_code_list.append(dep)
+ tree = utilcode.get_tree(cython_scope=context.cython_scope)
+ if tree:
+ module_node.merge_in(tree.body, tree.scope, merge_scope=True)
+ return module_node
+ return inject_utility_code_stage
+
+
+#
+# Pipeline factories
+#
+
+def create_pipeline(context, mode, exclude_classes=()):
+ assert mode in ('pyx', 'py', 'pxd')
+ from .Visitor import PrintTree
+ from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
+ from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform
+ from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
+ from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
+ from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
+ from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
+ from .ParseTreeTransforms import CalculateQualifiedNamesTransform
+ from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
+ from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
+ from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck
+ from .FlowControl import ControlFlowAnalysis
+ from .AnalysedTreeTransforms import AutoTestDictTransform
+ from .AutoDocTransforms import EmbedSignature
+ from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
+ from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
+ from .Optimize import InlineDefNodeCalls
+ from .Optimize import ConstantFolding, FinalOptimizePhase
+ from .Optimize import DropRefcountingTransform
+ from .Optimize import ConsolidateOverflowCheck
+ from .Buffer import IntroduceBufferAuxiliaryVars
+ from .ModuleNode import check_c_declarations, check_c_declarations_pxd
+
+
+ if mode == 'pxd':
+ _check_c_declarations = check_c_declarations_pxd
+ _specific_post_parse = PxdPostParse(context)
+ else:
+ _check_c_declarations = check_c_declarations
+ _specific_post_parse = None
+
+ if mode == 'py':
+ _align_function_definitions = AlignFunctionDefinitions(context)
+ else:
+ _align_function_definitions = None
+
+ # NOTE: This is the "common" parts of the pipeline, which is also
+ # code in pxd files. So it will be run multiple times in a
+ # compilation stage.
+ stages = [
+ NormalizeTree(context),
+ PostParse(context),
+ _specific_post_parse,
+ TrackNumpyAttributes(),
+ InterpretCompilerDirectives(context, context.compiler_directives),
+ ParallelRangeTransform(context),
+ AdjustDefByDirectives(context),
+ WithTransform(context),
+ MarkClosureVisitor(context),
+ _align_function_definitions,
+ RemoveUnreachableCode(context),
+ ConstantFolding(),
+ FlattenInListTransform(),
+ DecoratorTransform(context),
+ ForwardDeclareTypes(context),
+ InjectGilHandling(),
+ AnalyseDeclarationsTransform(context),
+ AutoTestDictTransform(context),
+ EmbedSignature(context),
+ EarlyReplaceBuiltinCalls(context), ## Necessary?
+ TransformBuiltinMethods(context),
+ MarkParallelAssignments(context),
+ ControlFlowAnalysis(context),
+ RemoveUnreachableCode(context),
+ # MarkParallelAssignments(context),
+ MarkOverflowingArithmetic(context),
+ IntroduceBufferAuxiliaryVars(context),
+ _check_c_declarations,
+ InlineDefNodeCalls(context),
+ AnalyseExpressionsTransform(context),
+ FindInvalidUseOfFusedTypes(context),
+ ExpandInplaceOperators(context),
+ IterationTransform(context),
+ SwitchTransform(context),
+ OptimizeBuiltinCalls(context), ## Necessary?
+ CreateClosureClasses(context), ## After all lookups and type inference
+ CalculateQualifiedNamesTransform(context),
+ ConsolidateOverflowCheck(context),
+ DropRefcountingTransform(),
+ FinalOptimizePhase(context),
+ GilCheck(),
+ ]
+ filtered_stages = []
+ for s in stages:
+ if s.__class__ not in exclude_classes:
+ filtered_stages.append(s)
+ return filtered_stages
+
+def create_pyx_pipeline(context, options, result, py=False, exclude_classes=()):
+ if py:
+ mode = 'py'
+ else:
+ mode = 'pyx'
+ test_support = []
+ if options.evaluate_tree_assertions:
+ from ..TestUtils import TreeAssertVisitor
+ test_support.append(TreeAssertVisitor())
+
+ if options.gdb_debug:
+ from ..Debugger import DebugWriter # requires Py2.5+
+ from .ParseTreeTransforms import DebugTransform
+ context.gdb_debug_outputwriter = DebugWriter.CythonDebugWriter(
+ options.output_dir)
+ debug_transform = [DebugTransform(context, options, result)]
+ else:
+ debug_transform = []
+
+ return list(itertools.chain(
+ [parse_stage_factory(context)],
+ create_pipeline(context, mode, exclude_classes=exclude_classes),
+ test_support,
+ [inject_pxd_code_stage_factory(context),
+ inject_utility_code_stage_factory(context),
+ abort_on_errors],
+ debug_transform,
+ [generate_pyx_code_stage_factory(options, result)]))
+
+def create_pxd_pipeline(context, scope, module_name):
+ from .CodeGeneration import ExtractPxdCode
+
+ # The pxd pipeline ends up with a CCodeWriter containing the
+ # code of the pxd, as well as a pxd scope.
+ return [
+ parse_pxd_stage_factory(context, scope, module_name)
+ ] + create_pipeline(context, 'pxd') + [
+ ExtractPxdCode()
+ ]
+
+def create_py_pipeline(context, options, result):
+ return create_pyx_pipeline(context, options, result, py=True)
+
+def create_pyx_as_pxd_pipeline(context, result):
+ from .ParseTreeTransforms import AlignFunctionDefinitions, \
+ MarkClosureVisitor, WithTransform, AnalyseDeclarationsTransform
+ from .Optimize import ConstantFolding, FlattenInListTransform
+ from .Nodes import StatListNode
+ pipeline = []
+ pyx_pipeline = create_pyx_pipeline(context, context.options, result,
+ exclude_classes=[
+ AlignFunctionDefinitions,
+ MarkClosureVisitor,
+ ConstantFolding,
+ FlattenInListTransform,
+ WithTransform
+ ])
+ for stage in pyx_pipeline:
+ pipeline.append(stage)
+ if isinstance(stage, AnalyseDeclarationsTransform):
+ # This is the last stage we need.
+ break
+ def fake_pxd(root):
+ for entry in root.scope.entries.values():
+ if not entry.in_cinclude:
+ entry.defined_in_pxd = 1
+ if entry.name == entry.cname and entry.visibility != 'extern':
+ # Always mangle non-extern cimported entries.
+ entry.cname = entry.scope.mangle(Naming.func_prefix, entry.name)
+ return StatListNode(root.pos, stats=[]), root.scope
+ pipeline.append(fake_pxd)
+ return pipeline
+
+def insert_into_pipeline(pipeline, transform, before=None, after=None):
+ """
+ Insert a new transform into the pipeline after or before an instance of
+ the given class. e.g.
+
+ pipeline = insert_into_pipeline(pipeline, transform,
+ after=AnalyseDeclarationsTransform)
+ """
+ assert before or after
+
+ cls = before or after
+ for i, t in enumerate(pipeline):
+ if isinstance(t, cls):
+ break
+
+ if after:
+ i += 1
+
+ return pipeline[:i] + [transform] + pipeline[i:]
+
+#
+# Running a pipeline
+#
+
+_pipeline_entry_points = {}
+
+
+def run_pipeline(pipeline, source, printtree=True):
+ from .Visitor import PrintTree
+ exec_ns = globals().copy() if DebugFlags.debug_verbose_pipeline else None
+
+ def run(phase, data):
+ return phase(data)
+
+ error = None
+ data = source
+ try:
+ try:
+ for phase in pipeline:
+ if phase is not None:
+ if not printtree and isinstance(phase, PrintTree):
+ continue
+ if DebugFlags.debug_verbose_pipeline:
+ t = time()
+ print("Entering pipeline phase %r" % phase)
+ # create a new wrapper for each step to show the name in profiles
+ phase_name = getattr(phase, '__name__', type(phase).__name__)
+ try:
+ run = _pipeline_entry_points[phase_name]
+ except KeyError:
+ exec("def %s(phase, data): return phase(data)" % phase_name, exec_ns)
+ run = _pipeline_entry_points[phase_name] = exec_ns[phase_name]
+ data = run(phase, data)
+ if DebugFlags.debug_verbose_pipeline:
+ print(" %.3f seconds" % (time() - t))
+ except CompileError as err:
+ # err is set
+ Errors.report_error(err, use_stack=False)
+ error = err
+ except InternalError as err:
+ # Only raise if there was not an earlier error
+ if Errors.num_errors == 0:
+ raise
+ error = err
+ except AbortError as err:
+ error = err
+ return (error, data)
diff --git a/contrib/tools/cython/Cython/Compiler/PyrexTypes.py b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py
new file mode 100644
index 0000000000..b2c92989ff
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py
@@ -0,0 +1,4745 @@
+#
+# Cython/Python language types
+#
+
+from __future__ import absolute_import
+
+import copy
+import hashlib
+import re
+
+try:
+ reduce
+except NameError:
+ from functools import reduce
+
+from Cython.Utils import cached_function
+from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode
+from . import StringEncoding
+from . import Naming
+
+from .Errors import error, warning
+
+
+class BaseType(object):
+ #
+ # Base class for all Cython types including pseudo-types.
+
+ # List of attribute names of any subtypes
+ subtypes = []
+ _empty_declaration = None
+ _specialization_name = None
+ default_format_spec = None
+
+ def can_coerce_to_pyobject(self, env):
+ return False
+
+ def can_coerce_from_pyobject(self, env):
+ return False
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ return False
+
+ def convert_to_pystring(self, cvalue, code, format_spec=None):
+ raise NotImplementedError("C types that support string formatting must override this method")
+
+ def cast_code(self, expr_code):
+ return "((%s)%s)" % (self.empty_declaration_code(), expr_code)
+
+ def empty_declaration_code(self):
+ if self._empty_declaration is None:
+ self._empty_declaration = self.declaration_code('')
+ return self._empty_declaration
+
+ def specialization_name(self):
+ if self._specialization_name is None:
+ # This is not entirely robust.
+ common_subs = (self.empty_declaration_code()
+ .replace("unsigned ", "unsigned_")
+ .replace("long long", "long_long")
+ .replace(" ", "__"))
+ self._specialization_name = re.sub(
+ '[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs)
+ return self._specialization_name
+
+ def base_declaration_code(self, base_code, entity_code):
+ if entity_code:
+ return "%s %s" % (base_code, entity_code)
+ else:
+ return base_code
+
+ def __deepcopy__(self, memo):
+ """
+ Types never need to be copied, if we do copy, Unfortunate Things
+ Will Happen!
+ """
+ return self
+
+ def get_fused_types(self, result=None, seen=None, subtypes=None):
+ subtypes = subtypes or self.subtypes
+ if not subtypes:
+ return None
+
+ if result is None:
+ result = []
+ seen = set()
+
+ for attr in subtypes:
+ list_or_subtype = getattr(self, attr)
+ if list_or_subtype:
+ if isinstance(list_or_subtype, BaseType):
+ list_or_subtype.get_fused_types(result, seen)
+ else:
+ for subtype in list_or_subtype:
+ subtype.get_fused_types(result, seen)
+
+ return result
+
+ def specialize_fused(self, env):
+ if env.fused_to_specific:
+ return self.specialize(env.fused_to_specific)
+
+ return self
+
+ @property
+ def is_fused(self):
+ """
+ Whether this type or any of its subtypes is a fused type
+ """
+ # Add this indirection for the is_fused property to allow overriding
+ # get_fused_types in subclasses.
+ return self.get_fused_types()
+
+ def deduce_template_params(self, actual):
+ """
+ Deduce any template params in this (argument) type given the actual
+ argument type.
+
+ http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction
+ """
+ return {}
+
+ def __lt__(self, other):
+ """
+ For sorting. The sorting order should correspond to the preference of
+ conversion from Python types.
+
+ Override to provide something sensible. This is only implemented so that
+ python 3 doesn't trip
+ """
+ return id(type(self)) < id(type(other))
+
+ def py_type_name(self):
+ """
+ Return the name of the Python type that can coerce to this type.
+ """
+
+ def typeof_name(self):
+ """
+ Return the string with which fused python functions can be indexed.
+ """
+ if self.is_builtin_type or self.py_type_name() == 'object':
+ index_name = self.py_type_name()
+ else:
+ index_name = str(self)
+
+ return index_name
+
+ def check_for_null_code(self, cname):
+ """
+ Return the code for a NULL-check in case an UnboundLocalError should
+ be raised if an entry of this type is referenced before assignment.
+ Returns None if no check should be performed.
+ """
+ return None
+
+ def invalid_value(self):
+ """
+ Returns the most invalid value an object of this type can assume as a
+ C expression string. Returns None if no such value exists.
+ """
+
+
+class PyrexType(BaseType):
+ #
+ # Base class for all Cython types
+ #
+ # is_pyobject boolean Is a Python object type
+ # is_extension_type boolean Is a Python extension type
+ # is_final_type boolean Is a final extension type
+ # is_numeric boolean Is a C numeric type
+ # is_int boolean Is a C integer type
+ # is_float boolean Is a C floating point type
+ # is_complex boolean Is a C complex type
+ # is_void boolean Is the C void type
+ # is_array boolean Is a C array type
+ # is_ptr boolean Is a C pointer type
+ # is_null_ptr boolean Is the type of NULL
+ # is_reference boolean Is a C reference type
+ # is_const boolean Is a C const type.
+ # is_cfunction boolean Is a C function type
+ # is_struct_or_union boolean Is a C struct or union type
+ # is_struct boolean Is a C struct type
+ # is_enum boolean Is a C enum type
+ # is_typedef boolean Is a typedef type
+ # is_string boolean Is a C char * type
+ # is_pyunicode_ptr boolean Is a C PyUNICODE * type
+ # is_cpp_string boolean Is a C++ std::string type
+ # is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE
+ # is_returncode boolean Is used only to signal exceptions
+ # is_error boolean Is the dummy error type
+ # is_buffer boolean Is buffer access type
+ # is_pythran_expr boolean Is Pythran expr
+ # is_numpy_buffer boolean Is Numpy array buffer
+ # has_attributes boolean Has C dot-selectable attributes
+ # default_value string Initial value that can be assigned before first user assignment.
+ # declaration_value string The value statically assigned on declaration (if any).
+ # entry Entry The Entry for this type
+ #
+ # declaration_code(entity_code,
+ # for_display = 0, dll_linkage = None, pyrex = 0)
+ # Returns a code fragment for the declaration of an entity
+ # of this type, given a code fragment for the entity.
+ # * If for_display, this is for reading by a human in an error
+ # message; otherwise it must be valid C code.
+ # * If dll_linkage is not None, it must be 'DL_EXPORT' or
+ # 'DL_IMPORT', and will be added to the base type part of
+ # the declaration.
+ # * If pyrex = 1, this is for use in a 'cdef extern'
+ # statement of a Cython include file.
+ #
+ # assignable_from(src_type)
+ # Tests whether a variable of this type can be
+ # assigned a value of type src_type.
+ #
+ # same_as(other_type)
+ # Tests whether this type represents the same type
+ # as other_type.
+ #
+ # as_argument_type():
+ # Coerces array and C function types into pointer type for use as
+ # a formal argument type.
+ #
+
+ is_pyobject = 0
+ is_unspecified = 0
+ is_extension_type = 0
+ is_final_type = 0
+ is_builtin_type = 0
+ is_numeric = 0
+ is_int = 0
+ is_float = 0
+ is_complex = 0
+ is_void = 0
+ is_array = 0
+ is_ptr = 0
+ is_null_ptr = 0
+ is_reference = 0
+ is_const = 0
+ is_cfunction = 0
+ is_struct_or_union = 0
+ is_cpp_class = 0
+ is_cpp_string = 0
+ is_struct = 0
+ is_enum = 0
+ is_typedef = 0
+ is_string = 0
+ is_pyunicode_ptr = 0
+ is_unicode_char = 0
+ is_returncode = 0
+ is_error = 0
+ is_buffer = 0
+ is_ctuple = 0
+ is_memoryviewslice = 0
+ is_pythran_expr = 0
+ is_numpy_buffer = 0
+ has_attributes = 0
+ default_value = ""
+ declaration_value = ""
+
+ def resolve(self):
+ # If a typedef, returns the base type.
+ return self
+
+ def specialize(self, values):
+ # TODO(danilo): Override wherever it makes sense.
+ return self
+
+ def literal_code(self, value):
+ # Returns a C code fragment representing a literal
+ # value of this type.
+ return str(value)
+
+ def __str__(self):
+ return self.declaration_code("", for_display = 1).strip()
+
+ def same_as(self, other_type, **kwds):
+ return self.same_as_resolved_type(other_type.resolve(), **kwds)
+
+ def same_as_resolved_type(self, other_type):
+ return self == other_type or other_type is error_type
+
+ def subtype_of(self, other_type):
+ return self.subtype_of_resolved_type(other_type.resolve())
+
+ def subtype_of_resolved_type(self, other_type):
+ return self.same_as(other_type)
+
+ def assignable_from(self, src_type):
+ return self.assignable_from_resolved_type(src_type.resolve())
+
+ def assignable_from_resolved_type(self, src_type):
+ return self.same_as(src_type)
+
+ def as_argument_type(self):
+ return self
+
+ def is_complete(self):
+ # A type is incomplete if it is an unsized array,
+ # a struct whose attributes are not defined, etc.
+ return 1
+
+ def is_simple_buffer_dtype(self):
+ return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or
+ self.is_extension_type or self.is_ptr)
+
+ def struct_nesting_depth(self):
+ # Returns the number levels of nested structs. This is
+ # used for constructing a stack for walking the run-time
+ # type information of the struct.
+ return 1
+
+ def global_init_code(self, entry, code):
+ # abstract
+ pass
+
+ def needs_nonecheck(self):
+ return 0
+
+ def _assign_from_py_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None, extra_args=None):
+ args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
+ convert_call = "%s(%s%s)" % (
+ from_py_function or self.from_py_function,
+ source_code,
+ args,
+ )
+ if self.is_enum:
+ convert_call = typecast(self, c_long_type, convert_call)
+ return '%s = %s; %s' % (
+ result_code,
+ convert_call,
+ code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+
+
+def public_decl(base_code, dll_linkage):
+ if dll_linkage:
+ return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA '))
+ else:
+ return base_code
+
+def create_typedef_type(name, base_type, cname, is_external=0, namespace=None):
+ is_fused = base_type.is_fused
+ if base_type.is_complex or is_fused:
+ if is_external:
+ if is_fused:
+ msg = "Fused"
+ else:
+ msg = "Complex"
+
+ raise ValueError("%s external typedefs not supported" % msg)
+
+ return base_type
+ else:
+ return CTypedefType(name, base_type, cname, is_external, namespace)
+
+
+class CTypedefType(BaseType):
+ #
+ # Pseudo-type defined with a ctypedef statement in a
+ # 'cdef extern from' block.
+ # Delegates most attribute lookups to the base type.
+ # (Anything not defined here or in the BaseType is delegated.)
+ #
+ # qualified_name string
+ # typedef_name string
+ # typedef_cname string
+ # typedef_base_type PyrexType
+ # typedef_is_external bool
+
+ is_typedef = 1
+ typedef_is_external = 0
+
+ to_py_utility_code = None
+ from_py_utility_code = None
+
+ subtypes = ['typedef_base_type']
+
+ def __init__(self, name, base_type, cname, is_external=0, namespace=None):
+ assert not base_type.is_complex
+ self.typedef_name = name
+ self.typedef_cname = cname
+ self.typedef_base_type = base_type
+ self.typedef_is_external = is_external
+ self.typedef_namespace = namespace
+
+ def invalid_value(self):
+ return self.typedef_base_type.invalid_value()
+
+ def resolve(self):
+ return self.typedef_base_type.resolve()
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = self.typedef_name
+ else:
+ base_code = public_decl(self.typedef_cname, dll_linkage)
+ if self.typedef_namespace is not None and not pyrex:
+ base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def as_argument_type(self):
+ return self
+
+ def cast_code(self, expr_code):
+ # If self is really an array (rather than pointer), we can't cast.
+ # For example, the gmp mpz_t.
+ if self.typedef_base_type.is_array:
+ base_type = self.typedef_base_type.base_type
+ return CPtrType(base_type).cast_code(expr_code)
+ else:
+ return BaseType.cast_code(self, expr_code)
+
+ def specialize(self, values):
+ base_type = self.typedef_base_type.specialize(values)
+ namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None
+ if base_type is self.typedef_base_type and namespace is self.typedef_namespace:
+ return self
+ else:
+ return create_typedef_type(self.typedef_name, base_type, self.typedef_cname,
+ 0, namespace)
+
+ def __repr__(self):
+ return "<CTypedefType %s>" % self.typedef_cname
+
+ def __str__(self):
+ return self.typedef_name
+
+ def _create_utility_code(self, template_utility_code,
+ template_function_name):
+ type_name = type_identifier(self.typedef_cname)
+ utility_code = template_utility_code.specialize(
+ type = self.typedef_cname,
+ TypeName = type_name)
+ function_name = template_function_name % type_name
+ return utility_code, function_name
+
+ def create_to_py_utility_code(self, env):
+ if self.typedef_is_external:
+ if not self.to_py_utility_code:
+ base_type = self.typedef_base_type
+ if type(base_type) is CIntType:
+ self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntToPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": self.to_py_function}))
+ return True
+ elif base_type.is_float:
+ pass # XXX implement!
+ elif base_type.is_complex:
+ pass # XXX implement!
+ pass
+ elif base_type.is_cpp_string:
+ cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self)
+ context = {
+ 'cname': cname,
+ 'type': self.typedef_cname,
+ }
+ from .UtilityCode import CythonUtilityCode
+ env.use_utility_code(CythonUtilityCode.load(
+ "string.to_py", "CppConvert.pyx", context=context))
+ self.to_py_function = cname
+ return True
+ if self.to_py_utility_code:
+ env.use_utility_code(self.to_py_utility_code)
+ return True
+ # delegation
+ return self.typedef_base_type.create_to_py_utility_code(env)
+
+ def create_from_py_utility_code(self, env):
+ if self.typedef_is_external:
+ if not self.from_py_utility_code:
+ base_type = self.typedef_base_type
+ if type(base_type) is CIntType:
+ self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntFromPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "FROM_PY_FUNCTION": self.from_py_function}))
+ return True
+ elif base_type.is_float:
+ pass # XXX implement!
+ elif base_type.is_complex:
+ pass # XXX implement!
+ elif base_type.is_cpp_string:
+ cname = '__pyx_convert_string_from_py_%s' % type_identifier(self)
+ context = {
+ 'cname': cname,
+ 'type': self.typedef_cname,
+ }
+ from .UtilityCode import CythonUtilityCode
+ env.use_utility_code(CythonUtilityCode.load(
+ "string.from_py", "CppConvert.pyx", context=context))
+ self.from_py_function = cname
+ return True
+ if self.from_py_utility_code:
+ env.use_utility_code(self.from_py_utility_code)
+ return True
+ # delegation
+ return self.typedef_base_type.create_from_py_utility_code(env)
+
+ def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
+ if to_py_function is None:
+ to_py_function = self.to_py_function
+ return self.typedef_base_type.to_py_call_code(
+ source_code, result_code, result_type, to_py_function)
+
+ def from_py_call_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None):
+ return self.typedef_base_type.from_py_call_code(
+ source_code, result_code, error_pos, code,
+ from_py_function or self.from_py_function,
+ error_condition or self.error_condition(result_code)
+ )
+
+ def overflow_check_binop(self, binop, env, const_rhs=False):
+ env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
+ type = self.empty_declaration_code()
+ name = self.specialization_name()
+ if binop == "lshift":
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "LeftShift", "Overflow.c",
+ context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
+ else:
+ if const_rhs:
+ binop += "_const"
+ _load_overflow_base(env)
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "SizeCheck", "Overflow.c",
+ context={'TYPE': type, 'NAME': name}))
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "Binop", "Overflow.c",
+ context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
+ return "__Pyx_%s_%s_checking_overflow" % (binop, name)
+
+ def error_condition(self, result_code):
+ if self.typedef_is_external:
+ if self.exception_value:
+ condition = "(%s == %s)" % (
+ result_code, self.cast_code(self.exception_value))
+ if self.exception_check:
+ condition += " && PyErr_Occurred()"
+ return condition
+ # delegation
+ return self.typedef_base_type.error_condition(result_code)
+
+ def __getattr__(self, name):
+ return getattr(self.typedef_base_type, name)
+
+ def py_type_name(self):
+ return self.typedef_base_type.py_type_name()
+
+ def can_coerce_to_pyobject(self, env):
+ return self.typedef_base_type.can_coerce_to_pyobject(env)
+
+ def can_coerce_from_pyobject(self, env):
+ return self.typedef_base_type.can_coerce_from_pyobject(env)
+
+
+class MemoryViewSliceType(PyrexType):
+
+ is_memoryviewslice = 1
+
+ has_attributes = 1
+ scope = None
+
+ # These are special cased in Defnode
+ from_py_function = None
+ to_py_function = None
+
+ exception_value = None
+ exception_check = True
+
+ subtypes = ['dtype']
+
+ def __init__(self, base_dtype, axes):
+ """
+ MemoryViewSliceType(base, axes)
+
+ Base is the C base type; axes is a list of (access, packing) strings,
+ where access is one of 'full', 'direct' or 'ptr' and packing is one of
+ 'contig', 'strided' or 'follow'. There is one (access, packing) tuple
+ for each dimension.
+
+ the access specifiers determine whether the array data contains
+ pointers that need to be dereferenced along that axis when
+ retrieving/setting:
+
+ 'direct' -- No pointers stored in this dimension.
+ 'ptr' -- Pointer stored in this dimension.
+ 'full' -- Check along this dimension, don't assume either.
+
+ the packing specifiers specify how the array elements are layed-out
+ in memory.
+
+ 'contig' -- The data is contiguous in memory along this dimension.
+ At most one dimension may be specified as 'contig'.
+ 'strided' -- The data isn't contiguous along this dimension.
+ 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
+ has its stride automatically computed from extents of the other
+ dimensions to ensure C or Fortran memory layout.
+
+ C-contiguous memory has 'direct' as the access spec, 'contig' as the
+ *last* axis' packing spec and 'follow' for all other packing specs.
+
+ Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
+ the *first* axis' packing spec and 'follow' for all other packing
+ specs.
+ """
+ from . import Buffer, MemoryView
+
+ self.dtype = base_dtype
+ self.axes = axes
+ self.ndim = len(axes)
+ self.flags = MemoryView.get_buf_flags(self.axes)
+
+ self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
+ assert not (self.is_c_contig and self.is_f_contig)
+
+ self.mode = MemoryView.get_mode(axes)
+ self.writable_needed = False
+
+ if not self.dtype.is_fused:
+ self.dtype_name = Buffer.mangle_dtype_name(self.dtype)
+
+ def __hash__(self):
+ return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes))
+
+ def __eq__(self, other):
+ if isinstance(other, BaseType):
+ return self.same_as_resolved_type(other)
+ else:
+ return False
+
+ def same_as_resolved_type(self, other_type):
+ return ((other_type.is_memoryviewslice and
+ #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
+ self.dtype.same_as(other_type.dtype) and
+ self.axes == other_type.axes) or
+ other_type is error_type)
+
+ def needs_nonecheck(self):
+ return True
+
+ def is_complete(self):
+ # incomplete since the underlying struct doesn't have a cython.memoryview object.
+ return 0
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ # XXX: we put these guards in for now...
+ assert not pyrex
+ assert not dll_linkage
+ from . import MemoryView
+ base_code = str(self) if for_display else MemoryView.memviewslice_cname
+ return self.base_declaration_code(
+ base_code,
+ entity_code)
+
+ def attributes_known(self):
+ if self.scope is None:
+ from . import Symtab
+
+ self.scope = scope = Symtab.CClassScope(
+ 'mvs_class_'+self.specialization_suffix(),
+ None,
+ visibility='extern')
+
+ scope.parent_type = self
+ scope.directives = {}
+
+ scope.declare_var('_data', c_char_ptr_type, None,
+ cname='data', is_cdef=1)
+
+ return True
+
+ def declare_attribute(self, attribute, env, pos):
+ from . import MemoryView, Options
+
+ scope = self.scope
+
+ if attribute == 'shape':
+ scope.declare_var('shape',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ pos,
+ cname='shape',
+ is_cdef=1)
+
+ elif attribute == 'strides':
+ scope.declare_var('strides',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ pos,
+ cname='strides',
+ is_cdef=1)
+
+ elif attribute == 'suboffsets':
+ scope.declare_var('suboffsets',
+ c_array_type(c_py_ssize_t_type,
+ Options.buffer_max_dims),
+ pos,
+ cname='suboffsets',
+ is_cdef=1)
+
+ elif attribute in ("copy", "copy_fortran"):
+ ndim = len(self.axes)
+
+ follow_dim = [('direct', 'follow')]
+ contig_dim = [('direct', 'contig')]
+ to_axes_c = follow_dim * (ndim - 1) + contig_dim
+ to_axes_f = contig_dim + follow_dim * (ndim -1)
+
+ dtype = self.dtype
+ if dtype.is_const:
+ dtype = dtype.const_base_type
+
+ to_memview_c = MemoryViewSliceType(dtype, to_axes_c)
+ to_memview_f = MemoryViewSliceType(dtype, to_axes_f)
+
+ for to_memview, cython_name in [(to_memview_c, "copy"),
+ (to_memview_f, "copy_fortran")]:
+ copy_func_type = CFuncType(
+ to_memview,
+ [CFuncTypeArg("memviewslice", self, None)])
+ copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview)
+
+ entry = scope.declare_cfunction(
+ cython_name,
+ copy_func_type, pos=pos, defining=1,
+ cname=copy_cname)
+
+ utility = MemoryView.get_copy_new_utility(pos, self, to_memview)
+ env.use_utility_code(utility)
+
+ MemoryView.use_cython_array_utility_code(env)
+
+ elif attribute in ("is_c_contig", "is_f_contig"):
+ # is_c_contig and is_f_contig functions
+ for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')):
+
+ is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
+
+ cfunctype = CFuncType(
+ return_type=c_bint_type,
+ args=[CFuncTypeArg("memviewslice", self, None)],
+ exception_value="-1",
+ )
+
+ entry = scope.declare_cfunction(cython_name,
+ cfunctype,
+ pos=pos,
+ defining=1,
+ cname=is_contig_name)
+
+ entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim)
+
+ return True
+
+ def get_entry(self, node, cname=None, type=None):
+ from . import MemoryView, Symtab
+
+ if cname is None:
+ assert node.is_simple() or node.is_temp or node.is_elemental
+ cname = node.result()
+
+ if type is None:
+ type = node.type
+
+ entry = Symtab.Entry(cname, cname, type, node.pos)
+ return MemoryView.MemoryViewSliceBufferEntry(entry)
+
+ def conforms_to(self, dst, broadcast=False, copying=False):
+ """
+ Returns True if src conforms to dst, False otherwise.
+
+ If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
+
+ Any packing/access spec is conformable to itself.
+
+ 'direct' and 'ptr' are conformable to 'full'.
+ 'contig' and 'follow' are conformable to 'strided'.
+ Any other combo is not conformable.
+ """
+ from . import MemoryView
+
+ src = self
+
+ #if not copying and self.writable_needed and not dst.writable_needed:
+ # return False
+
+ src_dtype, dst_dtype = src.dtype, dst.dtype
+ if dst_dtype.is_const:
+ # Requesting read-only views is always ok => consider only the non-const base type.
+ dst_dtype = dst_dtype.const_base_type
+ if src_dtype.is_const:
+ # When assigning between read-only views, compare only the non-const base types.
+ src_dtype = src_dtype.const_base_type
+ elif copying and src_dtype.is_const:
+ # Copying by value => ignore const on source.
+ src_dtype = src_dtype.const_base_type
+
+ if src_dtype != dst_dtype:
+ return False
+
+ if src.ndim != dst.ndim:
+ if broadcast:
+ src, dst = MemoryView.broadcast_types(src, dst)
+ else:
+ return False
+
+ for src_spec, dst_spec in zip(src.axes, dst.axes):
+ src_access, src_packing = src_spec
+ dst_access, dst_packing = dst_spec
+ if src_access != dst_access and dst_access != 'full':
+ return False
+ if src_packing != dst_packing and dst_packing != 'strided' and not copying:
+ return False
+
+ return True
+
+ def valid_dtype(self, dtype, i=0):
+ """
+ Return whether type dtype can be used as the base type of a
+ memoryview slice.
+
+ We support structs, numeric types and objects
+ """
+ if dtype.is_complex and dtype.real_type.is_int:
+ return False
+
+ if dtype.is_struct and dtype.kind == 'struct':
+ for member in dtype.scope.var_entries:
+ if not self.valid_dtype(member.type):
+ return False
+
+ return True
+
+ return (
+ dtype.is_error or
+ # Pointers are not valid (yet)
+ # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
+ (dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or
+ dtype.is_numeric or
+ dtype.is_pyobject or
+ dtype.is_fused or # accept this as it will be replaced by specializations later
+ (dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type))
+ )
+
+ def validate_memslice_dtype(self, pos):
+ if not self.valid_dtype(self.dtype):
+ error(pos, "Invalid base type for memoryview slice: %s" % self.dtype)
+
+ def assert_direct_dims(self, pos):
+ for access, packing in self.axes:
+ if access != 'direct':
+ error(pos, "All dimensions must be direct")
+ return False
+ return True
+
+ def transpose(self, pos):
+ if not self.assert_direct_dims(pos):
+ return error_type
+ return MemoryViewSliceType(self.dtype, self.axes[::-1])
+
+ def specialization_name(self):
+ return '%s_%s' % (
+ super(MemoryViewSliceType,self).specialization_name(),
+ self.specialization_suffix())
+
+ def specialization_suffix(self):
+ return "%s_%s" % (self.axes_to_name(), self.dtype_name)
+
+ def can_coerce_to_pyobject(self, env):
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ return True
+
+ def check_for_null_code(self, cname):
+ return cname + '.memview'
+
+ def create_from_py_utility_code(self, env):
+ from . import MemoryView, Buffer
+
+ # We don't have 'code', so use a LazyUtilityCode with a callback.
+ def lazy_utility_callback(code):
+ context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype)
+ return TempitaUtilityCode.load(
+ "ObjectToMemviewSlice", "MemoryView_C.c", context=context)
+
+ env.use_utility_code(MemoryView.memviewslice_init_code)
+ env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
+
+ if self.is_c_contig:
+ c_or_f_flag = "__Pyx_IS_C_CONTIG"
+ elif self.is_f_contig:
+ c_or_f_flag = "__Pyx_IS_F_CONTIG"
+ else:
+ c_or_f_flag = "0"
+
+ suffix = self.specialization_suffix()
+ funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
+
+ context = dict(
+ MemoryView.context,
+ buf_flag = self.flags,
+ ndim = self.ndim,
+ axes_specs = ', '.join(self.axes_to_code()),
+ dtype_typedecl = self.dtype.empty_declaration_code(),
+ struct_nesting_depth = self.dtype.struct_nesting_depth(),
+ c_or_f_flag = c_or_f_flag,
+ funcname = funcname,
+ )
+
+ self.from_py_function = funcname
+ return True
+
+ def from_py_call_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None):
+ # NOTE: auto-detection of readonly buffers is disabled:
+ # writable = self.writable_needed or not self.dtype.is_const
+ writable = not self.dtype.is_const
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition,
+ extra_args=['PyBUF_WRITABLE' if writable else '0'])
+
+ def create_to_py_utility_code(self, env):
+ self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
+ return True
+
+ def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
+ assert self._dtype_to_py_func
+ assert self._dtype_from_py_func
+
+ to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func
+ from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func
+
+ tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject)
+ return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
+
+ def dtype_object_conversion_funcs(self, env):
+ get_function = "__pyx_memview_get_%s" % self.dtype_name
+ set_function = "__pyx_memview_set_%s" % self.dtype_name
+
+ context = dict(
+ get_function = get_function,
+ set_function = set_function,
+ )
+
+ if self.dtype.is_pyobject:
+ utility_name = "MemviewObjectToObject"
+ else:
+ self.dtype.create_to_py_utility_code(env)
+ to_py_function = self.dtype.to_py_function
+
+ from_py_function = None
+ if not self.dtype.is_const:
+ self.dtype.create_from_py_utility_code(env)
+ from_py_function = self.dtype.from_py_function
+
+ if not (to_py_function or from_py_function):
+ return "NULL", "NULL"
+ if not to_py_function:
+ get_function = "NULL"
+ if not from_py_function:
+ set_function = "NULL"
+
+ utility_name = "MemviewDtypeToObject"
+ error_condition = (self.dtype.error_condition('value') or
+ 'PyErr_Occurred()')
+ context.update(
+ to_py_function=to_py_function,
+ from_py_function=from_py_function,
+ dtype=self.dtype.empty_declaration_code(),
+ error_condition=error_condition,
+ )
+
+ utility = TempitaUtilityCode.load_cached(
+ utility_name, "MemoryView_C.c", context=context)
+ env.use_utility_code(utility)
+ return get_function, set_function
+
+ def axes_to_code(self):
+ """Return a list of code constants for each axis"""
+ from . import MemoryView
+ d = MemoryView._spec_to_const
+ return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
+
+ def axes_to_name(self):
+ """Return an abbreviated name for our axes"""
+ from . import MemoryView
+ d = MemoryView._spec_to_abbrev
+ return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
+
+ def error_condition(self, result_code):
+ return "!%s.memview" % result_code
+
+ def __str__(self):
+ from . import MemoryView
+
+ axes_code_list = []
+ for idx, (access, packing) in enumerate(self.axes):
+ flag = MemoryView.get_memoryview_flag(access, packing)
+ if flag == "strided":
+ axes_code_list.append(":")
+ else:
+ if flag == 'contiguous':
+ have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
+ if p == 'follow']
+ if have_follow or self.ndim == 1:
+ flag = '1'
+
+ axes_code_list.append("::" + flag)
+
+ if self.dtype.is_pyobject:
+ dtype_name = self.dtype.name
+ else:
+ dtype_name = self.dtype
+
+ return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
+
+ def specialize(self, values):
+ """This does not validate the base type!!"""
+ dtype = self.dtype.specialize(values)
+ if dtype is not self.dtype:
+ return MemoryViewSliceType(dtype, self.axes)
+
+ return self
+
+ def cast_code(self, expr_code):
+ return expr_code
+
+
+class BufferType(BaseType):
+ #
+ # Delegates most attribute lookups to the base type.
+ # (Anything not defined here or in the BaseType is delegated.)
+ #
+ # dtype PyrexType
+ # ndim int
+ # mode str
+ # negative_indices bool
+ # cast bool
+ # is_buffer bool
+ # writable bool
+
+ is_buffer = 1
+ writable = True
+
+ subtypes = ['dtype']
+
+ def __init__(self, base, dtype, ndim, mode, negative_indices, cast):
+ self.base = base
+ self.dtype = dtype
+ self.ndim = ndim
+ self.buffer_ptr_type = CPtrType(dtype)
+ self.mode = mode
+ self.negative_indices = negative_indices
+ self.cast = cast
+ self.is_numpy_buffer = self.base.name == "ndarray"
+
+ def can_coerce_to_pyobject(self,env):
+ return True
+
+ def can_coerce_from_pyobject(self,env):
+ return True
+
+ def as_argument_type(self):
+ return self
+
+ def specialize(self, values):
+ dtype = self.dtype.specialize(values)
+ if dtype is not self.dtype:
+ return BufferType(self.base, dtype, self.ndim, self.mode,
+ self.negative_indices, self.cast)
+ return self
+
+ def get_entry(self, node):
+ from . import Buffer
+ assert node.is_name
+ return Buffer.BufferEntry(node.entry)
+
+ def __getattr__(self, name):
+ return getattr(self.base, name)
+
+ def __repr__(self):
+ return "<BufferType %r>" % self.base
+
+ def __str__(self):
+ # avoid ', ', as fused functions split the signature string on ', '
+ cast_str = ''
+ if self.cast:
+ cast_str = ',cast=True'
+
+ return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim,
+ cast_str)
+
+ def assignable_from(self, other_type):
+ if other_type.is_buffer:
+ return (self.same_as(other_type, compare_base=False) and
+ self.base.assignable_from(other_type.base))
+
+ return self.base.assignable_from(other_type)
+
+ def same_as(self, other_type, compare_base=True):
+ if not other_type.is_buffer:
+ return other_type.same_as(self.base)
+
+ return (self.dtype.same_as(other_type.dtype) and
+ self.ndim == other_type.ndim and
+ self.mode == other_type.mode and
+ self.cast == other_type.cast and
+ (not compare_base or self.base.same_as(other_type.base)))
+
+
+class PyObjectType(PyrexType):
+ #
+ # Base class for all Python object types (reference-counted).
+ #
+ # buffer_defaults dict or None Default options for bu
+
+ name = "object"
+ is_pyobject = 1
+ default_value = "0"
+ declaration_value = "0"
+ buffer_defaults = None
+ is_extern = False
+ is_subclassed = False
+ is_gc_simple = False
+
+ def __str__(self):
+ return "Python object"
+
+ def __repr__(self):
+ return "<PyObjectType>"
+
+ def can_coerce_to_pyobject(self, env):
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ return True
+
+ def default_coerced_ctype(self):
+ """The default C type that this Python type coerces to, or None."""
+ return None
+
+ def assignable_from(self, src_type):
+ # except for pointers, conversion will be attempted
+ return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = "object"
+ else:
+ base_code = public_decl("PyObject", dll_linkage)
+ entity_code = "*%s" % entity_code
+ return self.base_declaration_code(base_code, entity_code)
+
+ def as_pyobject(self, cname):
+ if (not self.is_complete()) or self.is_extension_type:
+ return "(PyObject *)" + cname
+ else:
+ return cname
+
+ def py_type_name(self):
+ return "object"
+
+ def __lt__(self, other):
+ """
+ Make sure we sort highest, as instance checking on py_type_name
+ ('object') is always true
+ """
+ return False
+
+ def global_init_code(self, entry, code):
+ code.put_init_var_to_py_none(entry, nanny=False)
+
+ def check_for_null_code(self, cname):
+ return cname
+
+
+builtin_types_that_cannot_create_refcycles = set([
+ 'bool', 'int', 'long', 'float', 'complex',
+ 'bytearray', 'bytes', 'unicode', 'str', 'basestring'
+])
+
+
+class BuiltinObjectType(PyObjectType):
+ # objstruct_cname string Name of PyObject struct
+
+ is_builtin_type = 1
+ has_attributes = 1
+ base_type = None
+ module_name = '__builtin__'
+ require_exact = 1
+
+ # fields that let it look like an extension type
+ vtabslot_cname = None
+ vtabstruct_cname = None
+ vtabptr_cname = None
+ typedef_flag = True
+ is_external = True
+ decl_type = 'PyObject'
+
+ def __init__(self, name, cname, objstruct_cname=None):
+ self.name = name
+ self.cname = cname
+ self.typeptr_cname = "(&%s)" % cname
+ self.objstruct_cname = objstruct_cname
+ self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
+ if name == 'type':
+ # Special case the type type, as many C API calls (and other
+ # libraries) actually expect a PyTypeObject* for type arguments.
+ self.decl_type = objstruct_cname
+ if name == 'Exception':
+ self.require_exact = 0
+
+ def set_scope(self, scope):
+ self.scope = scope
+ if scope:
+ scope.parent_type = self
+
+ def __str__(self):
+ return "%s object" % self.name
+
+ def __repr__(self):
+ return "<%s>"% self.cname
+
+ def default_coerced_ctype(self):
+ if self.name in ('bytes', 'bytearray'):
+ return c_char_ptr_type
+ elif self.name == 'bool':
+ return c_bint_type
+ elif self.name == 'float':
+ return c_double_type
+ return None
+
+ def assignable_from(self, src_type):
+ if isinstance(src_type, BuiltinObjectType):
+ if self.name == 'basestring':
+ return src_type.name in ('str', 'unicode', 'basestring')
+ else:
+ return src_type.name == self.name
+ elif src_type.is_extension_type:
+ # FIXME: This is an ugly special case that we currently
+ # keep supporting. It allows users to specify builtin
+ # types as external extension types, while keeping them
+ # compatible with the real builtin types. We already
+ # generate a warning for it. Big TODO: remove!
+ return (src_type.module_name == '__builtin__' and
+ src_type.name == self.name)
+ else:
+ return True
+
+ def typeobj_is_available(self):
+ return True
+
+ def attributes_known(self):
+ return True
+
+ def subtype_of(self, type):
+ return type.is_pyobject and type.assignable_from(self)
+
+ def type_check_function(self, exact=True):
+ type_name = self.name
+ if type_name == 'str':
+ type_check = 'PyString_Check'
+ elif type_name == 'basestring':
+ type_check = '__Pyx_PyBaseString_Check'
+ elif type_name == 'Exception':
+ type_check = '__Pyx_PyException_Check'
+ elif type_name == 'bytearray':
+ type_check = 'PyByteArray_Check'
+ elif type_name == 'frozenset':
+ type_check = 'PyFrozenSet_Check'
+ else:
+ type_check = 'Py%s_Check' % type_name.capitalize()
+ if exact and type_name not in ('bool', 'slice', 'Exception'):
+ type_check += 'Exact'
+ return type_check
+
+ def isinstance_code(self, arg):
+ return '%s(%s)' % (self.type_check_function(exact=False), arg)
+
+ def type_test_code(self, arg, notnone=False, exact=True):
+ type_check = self.type_check_function(exact=exact)
+ check = 'likely(%s(%s))' % (type_check, arg)
+ if not notnone:
+ check += '||((%s) == Py_None)' % arg
+ if self.name == 'basestring':
+ name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")'
+ space_for_name = 16
+ else:
+ name = '"%s"' % self.name
+ # avoid wasting too much space but limit number of different format strings
+ space_for_name = (len(self.name) // 16 + 1) * 16
+ error = '((void)PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % (
+ space_for_name, name, arg)
+ return check + '||' + error
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = self.name
+ else:
+ base_code = public_decl(self.decl_type, dll_linkage)
+ entity_code = "*%s" % entity_code
+ return self.base_declaration_code(base_code, entity_code)
+
+ def as_pyobject(self, cname):
+ if self.decl_type == 'PyObject':
+ return cname
+ else:
+ return "(PyObject *)" + cname
+
+ def cast_code(self, expr_code, to_object_struct = False):
+ return "((%s*)%s)" % (
+ to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None
+ expr_code)
+
+ def py_type_name(self):
+ return self.name
+
+
+
+class PyExtensionType(PyObjectType):
+ #
+ # A Python extension type.
+ #
+ # name string
+ # scope CClassScope Attribute namespace
+ # visibility string
+ # typedef_flag boolean
+ # base_type PyExtensionType or None
+ # module_name string or None Qualified name of defining module
+ # objstruct_cname string Name of PyObject struct
+ # objtypedef_cname string Name of PyObject struct typedef
+ # typeobj_cname string or None C code fragment referring to type object
+ # typeptr_cname string or None Name of pointer to external type object
+ # vtabslot_cname string Name of C method table member
+ # vtabstruct_cname string Name of C method table struct
+ # vtabptr_cname string Name of pointer to C method table
+ # vtable_cname string Name of C method table definition
+ # early_init boolean Whether to initialize early (as opposed to during module execution).
+ # defered_declarations [thunk] Used to declare class hierarchies in order
+ # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
+
+ is_extension_type = 1
+ has_attributes = 1
+ early_init = 1
+
+ objtypedef_cname = None
+
+ def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None):
+ self.name = name
+ self.scope = None
+ self.typedef_flag = typedef_flag
+ if base_type is not None:
+ base_type.is_subclassed = True
+ self.base_type = base_type
+ self.module_name = None
+ self.objstruct_cname = None
+ self.typeobj_cname = None
+ self.typeptr_cname = None
+ self.vtabslot_cname = None
+ self.vtabstruct_cname = None
+ self.vtabptr_cname = None
+ self.vtable_cname = None
+ self.is_external = is_external
+ self.check_size = check_size or 'warn'
+ self.defered_declarations = []
+
+ def set_scope(self, scope):
+ self.scope = scope
+ if scope:
+ scope.parent_type = self
+
+ def needs_nonecheck(self):
+ return True
+
+ def subtype_of_resolved_type(self, other_type):
+ if other_type.is_extension_type or other_type.is_builtin_type:
+ return self is other_type or (
+ self.base_type and self.base_type.subtype_of(other_type))
+ else:
+ return other_type is py_object_type
+
+ def typeobj_is_available(self):
+ # Do we have a pointer to the type object?
+ return self.typeptr_cname
+
+ def typeobj_is_imported(self):
+ # If we don't know the C name of the type object but we do
+ # know which module it's defined in, it will be imported.
+ return self.typeobj_cname is None and self.module_name is not None
+
+ def assignable_from(self, src_type):
+ if self == src_type:
+ return True
+ if isinstance(src_type, PyExtensionType):
+ if src_type.base_type is not None:
+ return self.assignable_from(src_type.base_type)
+ if isinstance(src_type, BuiltinObjectType):
+ # FIXME: This is an ugly special case that we currently
+ # keep supporting. It allows users to specify builtin
+ # types as external extension types, while keeping them
+ # compatible with the real builtin types. We already
+ # generate a warning for it. Big TODO: remove!
+ return (self.module_name == '__builtin__' and
+ self.name == src_type.name)
+ return False
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
+ if pyrex or for_display:
+ base_code = self.name
+ else:
+ if self.typedef_flag:
+ objstruct = self.objstruct_cname
+ else:
+ objstruct = "struct %s" % self.objstruct_cname
+ base_code = public_decl(objstruct, dll_linkage)
+ if deref:
+ assert not entity_code
+ else:
+ entity_code = "*%s" % entity_code
+ return self.base_declaration_code(base_code, entity_code)
+
+ def type_test_code(self, py_arg, notnone=False):
+
+ none_check = "((%s) == Py_None)" % py_arg
+ type_check = "likely(__Pyx_TypeTest(%s, %s))" % (
+ py_arg, self.typeptr_cname)
+ if notnone:
+ return type_check
+ else:
+ return "likely(%s || %s)" % (none_check, type_check)
+
+ def attributes_known(self):
+ return self.scope is not None
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<PyExtensionType %s%s>" % (self.scope.class_name,
+ ("", " typedef")[self.typedef_flag])
+
+ def py_type_name(self):
+ if not self.module_name:
+ return self.name
+
+ return "__import__(%r, None, None, ['']).%s" % (self.module_name,
+ self.name)
+
+class CType(PyrexType):
+ #
+ # Base class for all C types (non-reference-counted).
+ #
+ # to_py_function string C function for converting to Python object
+ # from_py_function string C function for constructing from Python object
+ #
+
+ to_py_function = None
+ from_py_function = None
+ exception_value = None
+ exception_check = 1
+
+ def create_to_py_utility_code(self, env):
+ return self.to_py_function is not None
+
+ def create_from_py_utility_code(self, env):
+ return self.from_py_function is not None
+
+ def can_coerce_to_pyobject(self, env):
+ return self.create_to_py_utility_code(env)
+
+ def can_coerce_from_pyobject(self, env):
+ return self.create_from_py_utility_code(env)
+
+ def error_condition(self, result_code):
+ conds = []
+ if self.is_string or self.is_pyunicode_ptr:
+ conds.append("(!%s)" % result_code)
+ elif self.exception_value is not None:
+ conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value))
+ if self.exception_check:
+ conds.append("PyErr_Occurred()")
+ if len(conds) > 0:
+ return " && ".join(conds)
+ else:
+ return 0
+
+ def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
+ func = self.to_py_function if to_py_function is None else to_py_function
+ assert func
+ if self.is_string or self.is_cpp_string:
+ if result_type.is_builtin_type:
+ result_type_name = result_type.name
+ if result_type_name in ('bytes', 'str', 'unicode'):
+ func = func.replace("Object", result_type_name.title(), 1)
+ elif result_type_name == 'bytearray':
+ func = func.replace("Object", "ByteArray", 1)
+ return '%s = %s(%s)' % (
+ result_code,
+ func,
+ source_code or 'NULL')
+
+ def from_py_call_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None):
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition)
+
+
+
+class PythranExpr(CType):
+ # Pythran object of a given type
+
+ to_py_function = "__Pyx_pythran_to_python"
+ is_pythran_expr = True
+ writable = True
+ has_attributes = 1
+
+ def __init__(self, pythran_type, org_buffer=None):
+ self.org_buffer = org_buffer
+ self.pythran_type = pythran_type
+ self.name = self.pythran_type
+ self.cname = self.pythran_type
+ self.from_py_function = "from_python<%s>" % (self.pythran_type)
+ self.scope = None
+
+ def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
+ assert not pyrex
+ return "%s %s" % (self.cname, entity_code)
+
+ def attributes_known(self):
+ if self.scope is None:
+ from . import Symtab
+ # FIXME: fake C scope, might be better represented by a struct or C++ class scope
+ self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
+ scope.parent_type = self
+ scope.directives = {}
+ scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True)
+ scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True)
+
+ return True
+
+ def __eq__(self, other):
+ return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
+
+ def __ne__(self, other):
+ return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
+
+ def __hash__(self):
+ return hash(self.pythran_type)
+
+
+class CConstType(BaseType):
+
+ is_const = 1
+ subtypes = ['const_base_type']
+
+ def __init__(self, const_base_type):
+ self.const_base_type = const_base_type
+ if const_base_type.has_attributes and const_base_type.scope is not None:
+ from . import Symtab
+ self.scope = Symtab.CConstScope(const_base_type.scope)
+
+ def __repr__(self):
+ return "<CConstType %s>" % repr(self.const_base_type)
+
+ def __str__(self):
+ return self.declaration_code("", for_display=1)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if for_display or pyrex:
+ return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex)
+ else:
+ return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex)
+
+ def specialize(self, values):
+ base_type = self.const_base_type.specialize(values)
+ if base_type == self.const_base_type:
+ return self
+ else:
+ return CConstType(base_type)
+
+ def deduce_template_params(self, actual):
+ return self.const_base_type.deduce_template_params(actual)
+
+ def can_coerce_to_pyobject(self, env):
+ return self.const_base_type.can_coerce_to_pyobject(env)
+
+ def can_coerce_from_pyobject(self, env):
+ return self.const_base_type.can_coerce_from_pyobject(env)
+
+ def create_to_py_utility_code(self, env):
+ if self.const_base_type.create_to_py_utility_code(env):
+ self.to_py_function = self.const_base_type.to_py_function
+ return True
+
+ def same_as_resolved_type(self, other_type):
+ if other_type.is_const:
+ return self.const_base_type.same_as_resolved_type(other_type.const_base_type)
+ # Accept const LHS <- non-const RHS.
+ return self.const_base_type.same_as_resolved_type(other_type)
+
+ def __getattr__(self, name):
+ return getattr(self.const_base_type, name)
+
+
+class FusedType(CType):
+ """
+ Represents a Fused Type. All it needs to do is keep track of the types
+ it aggregates, as it will be replaced with its specific version wherever
+ needed.
+
+ See http://wiki.cython.org/enhancements/fusedtypes
+
+ types [PyrexType] is the list of types to be fused
+ name str the name of the ctypedef
+ """
+
+ is_fused = 1
+ exception_check = 0
+
+ def __init__(self, types, name=None):
+ # Use list rather than set to preserve order (list should be short).
+ flattened_types = []
+ for t in types:
+ if t.is_fused:
+ # recursively merge in subtypes
+ for subtype in t.types:
+ if subtype not in flattened_types:
+ flattened_types.append(subtype)
+ elif t not in flattened_types:
+ flattened_types.append(t)
+ self.types = flattened_types
+ self.name = name
+
+ def declaration_code(self, entity_code, for_display = 0,
+ dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ return self.name
+
+ raise Exception("This may never happen, please report a bug")
+
+ def __repr__(self):
+ return 'FusedType(name=%r)' % self.name
+
+ def specialize(self, values):
+ return values[self]
+
+ def get_fused_types(self, result=None, seen=None):
+ if result is None:
+ return [self]
+
+ if self not in seen:
+ result.append(self)
+ seen.add(self)
+
+
+class CVoidType(CType):
+ #
+ # C "void" type
+ #
+
+ is_void = 1
+ to_py_function = "__Pyx_void_to_None"
+
+ def __repr__(self):
+ return "<CVoidType>"
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = "void"
+ else:
+ base_code = public_decl("void", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def is_complete(self):
+ return 0
+
+class InvisibleVoidType(CVoidType):
+ #
+ # For use with C++ constructors and destructors return types.
+ # Acts like void, but does not print out a declaration.
+ #
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = "[void]"
+ else:
+ base_code = public_decl("", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+
+class CNumericType(CType):
+ #
+ # Base class for all C numeric types.
+ #
+ # rank integer Relative size
+ # signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
+ #
+
+ is_numeric = 1
+ default_value = "0"
+ has_attributes = True
+ scope = None
+
+ sign_words = ("unsigned ", "", "signed ")
+
+ def __init__(self, rank, signed = 1):
+ self.rank = rank
+ if rank > 0 and signed == SIGNED:
+ # Signed is meaningless for anything but char, and complicates
+ # type promotion.
+ signed = 1
+ self.signed = signed
+
+ def sign_and_name(self):
+ s = self.sign_words[self.signed]
+ n = rank_to_type_name[self.rank]
+ return s + n
+
+ def __repr__(self):
+ return "<CNumericType %s>" % self.sign_and_name()
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ type_name = self.sign_and_name()
+ if pyrex or for_display:
+ base_code = type_name.replace('PY_LONG_LONG', 'long long')
+ else:
+ base_code = public_decl(type_name, dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def attributes_known(self):
+ if self.scope is None:
+ from . import Symtab
+ self.scope = scope = Symtab.CClassScope(
+ '',
+ None,
+ visibility="extern")
+ scope.parent_type = self
+ scope.directives = {}
+ scope.declare_cfunction(
+ "conjugate",
+ CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
+ pos=None,
+ defining=1,
+ cname=" ")
+ return True
+
+ def __lt__(self, other):
+ """Sort based on rank, preferring signed over unsigned"""
+ if other.is_numeric:
+ return self.rank > other.rank and self.signed >= other.signed
+
+ # Prefer numeric types over others
+ return True
+
+ def py_type_name(self):
+ if self.rank <= 4:
+ return "(int, long)"
+ return "float"
+
+
+class ForbidUseClass:
+ def __repr__(self):
+ raise RuntimeError()
+ def __str__(self):
+ raise RuntimeError()
+ForbidUse = ForbidUseClass()
+
+
+class CIntLike(object):
+ """Mixin for shared behaviour of C integers and enums.
+ """
+ to_py_function = None
+ from_py_function = None
+ to_pyunicode_utility = None
+ default_format_spec = 'd'
+
+ def can_coerce_to_pyobject(self, env):
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if type(self).to_py_function is None:
+ self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntToPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": self.to_py_function}))
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if type(self).from_py_function is None:
+ self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntFromPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "FROM_PY_FUNCTION": self.from_py_function}))
+ return True
+
+ @staticmethod
+ def _parse_format(format_spec):
+ padding = ' '
+ if not format_spec:
+ return ('d', 0, padding)
+ format_type = format_spec[-1]
+ if format_type in ('o', 'd', 'x', 'X'):
+ prefix = format_spec[:-1]
+ elif format_type.isdigit():
+ format_type = 'd'
+ prefix = format_spec
+ else:
+ return (None, 0, padding)
+ if not prefix:
+ return (format_type, 0, padding)
+ if prefix[0] == '-':
+ prefix = prefix[1:]
+ if prefix and prefix[0] == '0':
+ padding = '0'
+ prefix = prefix.lstrip('0')
+ if prefix.isdigit():
+ return (format_type, int(prefix), padding)
+ return (None, 0, padding)
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ format_type, width, padding = self._parse_format(format_spec)
+ return format_type is not None and width <= 2**30
+
+ def convert_to_pystring(self, cvalue, code, format_spec=None):
+ if self.to_pyunicode_utility is None:
+ utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name()
+ to_pyunicode_utility = TempitaUtilityCode.load_cached(
+ "CIntToPyUnicode", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": utility_code_name})
+ self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility)
+ else:
+ utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility
+ code.globalstate.use_utility_code(to_pyunicode_utility)
+ format_type, width, padding_char = self._parse_format(format_spec)
+ return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)
+
+
+class CIntType(CIntLike, CNumericType):
+
+ is_int = 1
+ typedef_flag = 0
+ exception_value = -1
+
+ def get_to_py_type_conversion(self):
+ if self.rank < list(rank_to_type_name).index('int'):
+ # This assumes sizeof(short) < sizeof(int)
+ return "PyInt_FromLong"
+ else:
+ # Py{Int|Long}_From[Unsigned]Long[Long]
+ Prefix = "Int"
+ SignWord = ""
+ TypeName = "Long"
+ if not self.signed:
+ Prefix = "Long"
+ SignWord = "Unsigned"
+ if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
+ Prefix = "Long"
+ TypeName = "LongLong"
+ return "Py%s_From%s%s" % (Prefix, SignWord, TypeName)
+
+ def assignable_from_resolved_type(self, src_type):
+ return src_type.is_int or src_type.is_enum or src_type is error_type
+
+ def invalid_value(self):
+ if rank_to_type_name[int(self.rank)] == 'char':
+ return "'?'"
+ else:
+ # We do not really know the size of the type, so return
+ # a 32-bit literal and rely on casting to final type. It will
+ # be negative for signed ints, which is good.
+ return "0xbad0bad0"
+
+ def overflow_check_binop(self, binop, env, const_rhs=False):
+ env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
+ type = self.empty_declaration_code()
+ name = self.specialization_name()
+ if binop == "lshift":
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "LeftShift", "Overflow.c",
+ context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
+ else:
+ if const_rhs:
+ binop += "_const"
+ if type in ('int', 'long', 'long long'):
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "BaseCaseSigned", "Overflow.c",
+ context={'INT': type, 'NAME': name}))
+ elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "BaseCaseUnsigned", "Overflow.c",
+ context={'UINT': type, 'NAME': name}))
+ elif self.rank <= 1:
+ # sizeof(short) < sizeof(int)
+ return "__Pyx_%s_%s_no_overflow" % (binop, name)
+ else:
+ _load_overflow_base(env)
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "SizeCheck", "Overflow.c",
+ context={'TYPE': type, 'NAME': name}))
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "Binop", "Overflow.c",
+ context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
+ return "__Pyx_%s_%s_checking_overflow" % (binop, name)
+
+
+def _load_overflow_base(env):
+ env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
+ for type in ('int', 'long', 'long long'):
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "BaseCaseSigned", "Overflow.c",
+ context={'INT': type, 'NAME': type.replace(' ', '_')}))
+ for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "BaseCaseUnsigned", "Overflow.c",
+ context={'UINT': type, 'NAME': type.replace(' ', '_')}))
+
+
+class CAnonEnumType(CIntType):
+
+ is_enum = 1
+
+ def sign_and_name(self):
+ return 'int'
+
+
+class CReturnCodeType(CIntType):
+
+ to_py_function = "__Pyx_Owned_Py_None"
+
+ is_returncode = True
+ exception_check = False
+ default_format_spec = ''
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ return not format_spec
+
+ def convert_to_pystring(self, cvalue, code, format_spec=None):
+ return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname
+
+
+class CBIntType(CIntType):
+
+ to_py_function = "__Pyx_PyBool_FromLong"
+ from_py_function = "__Pyx_PyObject_IsTrue"
+ exception_check = 1 # for C++ bool
+ default_format_spec = ''
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec)
+
+ def convert_to_pystring(self, cvalue, code, format_spec=None):
+ if format_spec:
+ return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec)
+ # NOTE: no caching here as the string constant cnames depend on the current module
+ utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name()
+ to_pyunicode_utility = TempitaUtilityCode.load_cached(
+ "CBIntToPyUnicode", "TypeConversion.c", context={
+ "TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname,
+ "FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname,
+ "TO_PY_FUNCTION": utility_code_name,
+ })
+ code.globalstate.use_utility_code(to_pyunicode_utility)
+ return "%s(%s)" % (utility_code_name, cvalue)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if for_display:
+ base_code = 'bool'
+ elif pyrex:
+ base_code = 'bint'
+ else:
+ base_code = public_decl('int', dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def __repr__(self):
+ return "<CNumericType bint>"
+
+ def __str__(self):
+ return 'bint'
+
+ def py_type_name(self):
+ return "bool"
+
+
+class CPyUCS4IntType(CIntType):
+ # Py_UCS4
+
+ is_unicode_char = True
+
+ # Py_UCS4 coerces from and to single character unicode strings (or
+ # at most two characters on 16bit Unicode builds), but we also
+ # allow Python integers as input. The value range for Py_UCS4
+ # is 0..1114111, which is checked when converting from an integer
+ # value.
+
+ to_py_function = "PyUnicode_FromOrdinal"
+ from_py_function = "__Pyx_PyObject_AsPy_UCS4"
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ return False # does the right thing anyway
+
+ def create_from_py_utility_code(self, env):
+ env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c"))
+ return True
+
+ def sign_and_name(self):
+ return "Py_UCS4"
+
+
+class CPyUnicodeIntType(CIntType):
+ # Py_UNICODE
+
+ is_unicode_char = True
+
+ # Py_UNICODE coerces from and to single character unicode strings,
+ # but we also allow Python integers as input. The value range for
+ # Py_UNICODE is 0..1114111, which is checked when converting from
+ # an integer value.
+
+ to_py_function = "PyUnicode_FromOrdinal"
+ from_py_function = "__Pyx_PyObject_AsPy_UNICODE"
+
+ def can_coerce_to_pystring(self, env, format_spec=None):
+ return False # does the right thing anyway
+
+ def create_from_py_utility_code(self, env):
+ env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c"))
+ return True
+
+ def sign_and_name(self):
+ return "Py_UNICODE"
+
+
+class CPyHashTType(CIntType):
+
+ to_py_function = "__Pyx_PyInt_FromHash_t"
+ from_py_function = "__Pyx_PyInt_AsHash_t"
+
+ def sign_and_name(self):
+ return "Py_hash_t"
+
+class CPySSizeTType(CIntType):
+
+ to_py_function = "PyInt_FromSsize_t"
+ from_py_function = "__Pyx_PyIndex_AsSsize_t"
+
+ def sign_and_name(self):
+ return "Py_ssize_t"
+
+class CSSizeTType(CIntType):
+
+ to_py_function = "PyInt_FromSsize_t"
+ from_py_function = "PyInt_AsSsize_t"
+
+ def sign_and_name(self):
+ return "Py_ssize_t"
+
+class CSizeTType(CIntType):
+
+ to_py_function = "__Pyx_PyInt_FromSize_t"
+
+ def sign_and_name(self):
+ return "size_t"
+
+class CPtrdiffTType(CIntType):
+
+ def sign_and_name(self):
+ return "ptrdiff_t"
+
+
+class CFloatType(CNumericType):
+
+ is_float = 1
+ to_py_function = "PyFloat_FromDouble"
+ from_py_function = "__pyx_PyFloat_AsDouble"
+
+ exception_value = -1
+
+ def __init__(self, rank, math_h_modifier = ''):
+ CNumericType.__init__(self, rank, 1)
+ self.math_h_modifier = math_h_modifier
+ if rank == RANK_FLOAT:
+ self.from_py_function = "__pyx_PyFloat_AsFloat"
+
+ def assignable_from_resolved_type(self, src_type):
+ return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
+
+ def invalid_value(self):
+ return Naming.PYX_NAN
+
+class CComplexType(CNumericType):
+
+ is_complex = 1
+ to_py_function = "__pyx_PyComplex_FromComplex"
+ has_attributes = 1
+ scope = None
+
+ def __init__(self, real_type):
+ while real_type.is_typedef and not real_type.typedef_is_external:
+ real_type = real_type.typedef_base_type
+ self.funcsuffix = "_%s" % real_type.specialization_name()
+ if real_type.is_float:
+ self.math_h_modifier = real_type.math_h_modifier
+ else:
+ self.math_h_modifier = "_UNUSED"
+
+ self.real_type = real_type
+ CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
+ self.binops = {}
+ self.from_parts = "%s_from_parts" % self.specialization_name()
+ self.default_value = "%s(0, 0)" % self.from_parts
+
+ def __eq__(self, other):
+ if isinstance(self, CComplexType) and isinstance(other, CComplexType):
+ return self.real_type == other.real_type
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(self, CComplexType) and isinstance(other, CComplexType):
+ return self.real_type != other.real_type
+ else:
+ return True
+
+ def __lt__(self, other):
+ if isinstance(self, CComplexType) and isinstance(other, CComplexType):
+ return self.real_type < other.real_type
+ else:
+ # this is arbitrary, but it makes sure we always have
+ # *some* kind of order
+ return False
+
+ def __hash__(self):
+ return ~hash(self.real_type)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
+ base_code = "%s complex" % real_code
+ else:
+ base_code = public_decl(self.sign_and_name(), dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def sign_and_name(self):
+ real_type_name = self.real_type.specialization_name()
+ real_type_name = real_type_name.replace('long__double','long_double')
+ real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
+ return Naming.type_prefix + real_type_name + "_complex"
+
+ def assignable_from(self, src_type):
+ # Temporary hack/feature disabling, see #441
+ if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
+ and src_type.typedef_is_external):
+ return False
+ elif src_type.is_pyobject:
+ return True
+ else:
+ return super(CComplexType, self).assignable_from(src_type)
+
+ def assignable_from_resolved_type(self, src_type):
+ return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
+ or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
+ or src_type is error_type)
+
+ def attributes_known(self):
+ if self.scope is None:
+ from . import Symtab
+ self.scope = scope = Symtab.CClassScope(
+ '',
+ None,
+ visibility="extern")
+ scope.parent_type = self
+ scope.directives = {}
+ scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True)
+ scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True)
+ scope.declare_cfunction(
+ "conjugate",
+ CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
+ pos=None,
+ defining=1,
+ cname="__Pyx_c_conj%s" % self.funcsuffix)
+
+ return True
+
+ def _utility_code_context(self):
+ return {
+ 'type': self.empty_declaration_code(),
+ 'type_name': self.specialization_name(),
+ 'real_type': self.real_type.empty_declaration_code(),
+ 'func_suffix': self.funcsuffix,
+ 'm': self.math_h_modifier,
+ 'is_float': int(self.real_type.is_float)
+ }
+
+ def create_declaration_utility_code(self, env):
+ # This must always be run, because a single CComplexType instance can be shared
+ # across multiple compilations (the one created in the module scope)
+ env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c'))
+ env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c'))
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ 'Declarations', 'Complex.c', self._utility_code_context()))
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ 'Arithmetic', 'Complex.c', self._utility_code_context()))
+ return True
+
+ def can_coerce_to_pyobject(self, env):
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ return True
+
+ def create_to_py_utility_code(self, env):
+ env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c'))
+ return True
+
+ def create_from_py_utility_code(self, env):
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ 'FromPy', 'Complex.c', self._utility_code_context()))
+ self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
+ return True
+
+ def lookup_op(self, nargs, op):
+ try:
+ return self.binops[nargs, op]
+ except KeyError:
+ pass
+ try:
+ op_name = complex_ops[nargs, op]
+ self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix)
+ return func_name
+ except KeyError:
+ return None
+
+ def unary_op(self, op):
+ return self.lookup_op(1, op)
+
+ def binary_op(self, op):
+ return self.lookup_op(2, op)
+
+ def py_type_name(self):
+ return "complex"
+
+ def cast_code(self, expr_code):
+ return expr_code
+
+complex_ops = {
+ (1, '-'): 'neg',
+ (1, 'zero'): 'is_zero',
+ (2, '+'): 'sum',
+ (2, '-'): 'diff',
+ (2, '*'): 'prod',
+ (2, '/'): 'quot',
+ (2, '**'): 'pow',
+ (2, '=='): 'eq',
+}
+
+
+class CPyTSSTType(CType):
+ #
+ # PEP-539 "Py_tss_t" type
+ #
+
+ declaration_value = "Py_tss_NEEDS_INIT"
+
+ def __repr__(self):
+ return "<Py_tss_t>"
+
+ def declaration_code(self, entity_code,
+ for_display=0, dll_linkage=None, pyrex=0):
+ if pyrex or for_display:
+ base_code = "Py_tss_t"
+ else:
+ base_code = public_decl("Py_tss_t", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+
+class CPointerBaseType(CType):
+ # common base type for pointer/array types
+ #
+ # base_type CType Reference type
+
+ subtypes = ['base_type']
+
+ def __init__(self, base_type):
+ self.base_type = base_type
+ if base_type.is_const:
+ base_type = base_type.const_base_type
+ for char_type in (c_char_type, c_uchar_type, c_schar_type):
+ if base_type.same_as(char_type):
+ self.is_string = 1
+ break
+ else:
+ if base_type.same_as(c_py_unicode_type):
+ self.is_pyunicode_ptr = 1
+
+ if self.is_string and not base_type.is_error:
+ if base_type.signed == 2:
+ self.to_py_function = "__Pyx_PyObject_FromCString"
+ if self.is_ptr:
+ self.from_py_function = "__Pyx_PyObject_As%sSString"
+ elif base_type.signed:
+ self.to_py_function = "__Pyx_PyObject_FromString"
+ if self.is_ptr:
+ self.from_py_function = "__Pyx_PyObject_As%sString"
+ else:
+ self.to_py_function = "__Pyx_PyObject_FromCString"
+ if self.is_ptr:
+ self.from_py_function = "__Pyx_PyObject_As%sUString"
+ if self.is_ptr:
+ self.from_py_function %= '' if self.base_type.is_const else 'Writable'
+ self.exception_value = "NULL"
+ elif self.is_pyunicode_ptr and not base_type.is_error:
+ self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
+ if self.is_ptr:
+ self.from_py_function = "__Pyx_PyUnicode_AsUnicode"
+ self.exception_value = "NULL"
+
+ def py_type_name(self):
+ if self.is_string:
+ return "bytes"
+ elif self.is_pyunicode_ptr:
+ return "unicode"
+ else:
+ return super(CPointerBaseType, self).py_type_name()
+
+ def literal_code(self, value):
+ if self.is_string:
+ assert isinstance(value, str)
+ return '"%s"' % StringEncoding.escape_byte_string(value)
+
+
+class CArrayType(CPointerBaseType):
+ # base_type CType Element type
+ # size integer or None Number of elements
+
+ is_array = 1
+ to_tuple_function = None
+
+ def __init__(self, base_type, size):
+ super(CArrayType, self).__init__(base_type)
+ self.size = size
+
+ def __eq__(self, other):
+ if isinstance(other, CType) and other.is_array and self.size == other.size:
+ return self.base_type.same_as(other.base_type)
+ return False
+
+ def __hash__(self):
+ return hash(self.base_type) + 28 # arbitrarily chosen offset
+
+ def __repr__(self):
+ return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
+
+ def same_as_resolved_type(self, other_type):
+ return ((other_type.is_array and
+ self.base_type.same_as(other_type.base_type))
+ or other_type is error_type)
+
+ def assignable_from_resolved_type(self, src_type):
+ # C arrays are assigned by value, either Python containers or C arrays/pointers
+ if src_type.is_pyobject:
+ return True
+ if src_type.is_ptr or src_type.is_array:
+ return self.base_type.assignable_from(src_type.base_type)
+ return False
+
+ def element_ptr_type(self):
+ return c_ptr_type(self.base_type)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if self.size is not None:
+ dimension_code = self.size
+ else:
+ dimension_code = ""
+ if entity_code.startswith("*"):
+ entity_code = "(%s)" % entity_code
+ return self.base_type.declaration_code(
+ "%s[%s]" % (entity_code, dimension_code),
+ for_display, dll_linkage, pyrex)
+
+ def as_argument_type(self):
+ return c_ptr_type(self.base_type)
+
+ def is_complete(self):
+ return self.size is not None
+
+ def specialize(self, values):
+ base_type = self.base_type.specialize(values)
+ if base_type == self.base_type:
+ return self
+ else:
+ return CArrayType(base_type, self.size)
+
+ def deduce_template_params(self, actual):
+ if isinstance(actual, CArrayType):
+ return self.base_type.deduce_template_params(actual.base_type)
+ else:
+ return {}
+
+ def can_coerce_to_pyobject(self, env):
+ return self.base_type.can_coerce_to_pyobject(env)
+
+ def can_coerce_from_pyobject(self, env):
+ return self.base_type.can_coerce_from_pyobject(env)
+
+ def create_to_py_utility_code(self, env):
+ if self.to_py_function is not None:
+ return self.to_py_function
+ if not self.base_type.create_to_py_utility_code(env):
+ return False
+
+ safe_typename = self.base_type.specialization_name()
+ to_py_function = "__Pyx_carray_to_py_%s" % safe_typename
+ to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename
+
+ from .UtilityCode import CythonUtilityCode
+ context = {
+ 'cname': to_py_function,
+ 'to_tuple_cname': to_tuple_function,
+ 'base_type': self.base_type,
+ }
+ env.use_utility_code(CythonUtilityCode.load(
+ "carray.to_py", "CConvert.pyx",
+ outer_module_scope=env.global_scope(), # need access to types declared in module
+ context=context, compiler_directives=dict(env.global_scope().directives)))
+ self.to_tuple_function = to_tuple_function
+ self.to_py_function = to_py_function
+ return True
+
+ def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
+ func = self.to_py_function if to_py_function is None else to_py_function
+ if self.is_string or self.is_pyunicode_ptr:
+ return '%s = %s(%s)' % (
+ result_code,
+ func,
+ source_code)
+ target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple'
+ return '%s = %s(%s, %s)' % (
+ result_code,
+ self.to_tuple_function if target_is_tuple else func,
+ source_code,
+ self.size)
+
+ def create_from_py_utility_code(self, env):
+ if self.from_py_function is not None:
+ return self.from_py_function
+ if not self.base_type.create_from_py_utility_code(env):
+ return False
+
+ from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name()
+
+ from .UtilityCode import CythonUtilityCode
+ context = {
+ 'cname': from_py_function,
+ 'base_type': self.base_type,
+ }
+ env.use_utility_code(CythonUtilityCode.load(
+ "carray.from_py", "CConvert.pyx",
+ outer_module_scope=env.global_scope(), # need access to types declared in module
+ context=context, compiler_directives=dict(env.global_scope().directives)))
+ self.from_py_function = from_py_function
+ return True
+
+ def from_py_call_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None):
+ assert not error_condition, '%s: %s' % (error_pos, error_condition)
+ call_code = "%s(%s, %s, %s)" % (
+ from_py_function or self.from_py_function,
+ source_code, result_code, self.size)
+ return code.error_goto_if_neg(call_code, error_pos)
+
+
+class CPtrType(CPointerBaseType):
+ # base_type CType Reference type
+
+ is_ptr = 1
+ default_value = "0"
+
+ def __hash__(self):
+ return hash(self.base_type) + 27 # arbitrarily chosen offset
+
+ def __eq__(self, other):
+ if isinstance(other, CType) and other.is_ptr:
+ return self.base_type.same_as(other.base_type)
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "<CPtrType %s>" % repr(self.base_type)
+
+ def same_as_resolved_type(self, other_type):
+ return ((other_type.is_ptr and
+ self.base_type.same_as(other_type.base_type))
+ or other_type is error_type)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ #print "CPtrType.declaration_code: pointer to", self.base_type ###
+ return self.base_type.declaration_code(
+ "*%s" % entity_code,
+ for_display, dll_linkage, pyrex)
+
+ def assignable_from_resolved_type(self, other_type):
+ if other_type is error_type:
+ return 1
+ if other_type.is_null_ptr:
+ return 1
+ if self.base_type.is_const:
+ self = CPtrType(self.base_type.const_base_type)
+ if self.base_type.is_cfunction:
+ if other_type.is_ptr:
+ other_type = other_type.base_type.resolve()
+ if other_type.is_cfunction:
+ return self.base_type.pointer_assignable_from_resolved_type(other_type)
+ else:
+ return 0
+ if (self.base_type.is_cpp_class and other_type.is_ptr
+ and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
+ return 1
+ if other_type.is_array or other_type.is_ptr:
+ return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
+ return 0
+
+ def specialize(self, values):
+ base_type = self.base_type.specialize(values)
+ if base_type == self.base_type:
+ return self
+ else:
+ return CPtrType(base_type)
+
+ def deduce_template_params(self, actual):
+ if isinstance(actual, CPtrType):
+ return self.base_type.deduce_template_params(actual.base_type)
+ else:
+ return {}
+
+ def invalid_value(self):
+ return "1"
+
+ def find_cpp_operation_type(self, operator, operand_type=None):
+ if self.base_type.is_cpp_class:
+ return self.base_type.find_cpp_operation_type(operator, operand_type)
+ return None
+
+
+class CNullPtrType(CPtrType):
+
+ is_null_ptr = 1
+
+
+class CReferenceType(BaseType):
+
+ is_reference = 1
+ is_fake_reference = 0
+
+ def __init__(self, base_type):
+ self.ref_base_type = base_type
+
+ def __repr__(self):
+ return "<CReferenceType %s>" % repr(self.ref_base_type)
+
+ def __str__(self):
+ return "%s &" % self.ref_base_type
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ #print "CReferenceType.declaration_code: pointer to", self.base_type ###
+ return self.ref_base_type.declaration_code(
+ "&%s" % entity_code,
+ for_display, dll_linkage, pyrex)
+
+ def specialize(self, values):
+ base_type = self.ref_base_type.specialize(values)
+ if base_type == self.ref_base_type:
+ return self
+ else:
+ return type(self)(base_type)
+
+ def deduce_template_params(self, actual):
+ return self.ref_base_type.deduce_template_params(actual)
+
+ def __getattr__(self, name):
+ return getattr(self.ref_base_type, name)
+
+
+class CFakeReferenceType(CReferenceType):
+
+ is_fake_reference = 1
+
+ def __repr__(self):
+ return "<CFakeReferenceType %s>" % repr(self.ref_base_type)
+
+ def __str__(self):
+ return "%s [&]" % self.ref_base_type
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ #print "CReferenceType.declaration_code: pointer to", self.base_type ###
+ return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code)
+
+
+class CFuncType(CType):
+ # return_type CType
+ # args [CFuncTypeArg]
+ # has_varargs boolean
+ # exception_value string
+ # exception_check boolean True if PyErr_Occurred check needed
+ # calling_convention string Function calling convention
+ # nogil boolean Can be called without gil
+ # with_gil boolean Acquire gil around function body
+ # templates [string] or None
+ # cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
+ # from_fused boolean Indicates whether this is a specialized
+ # C function
+ # is_strict_signature boolean function refuses to accept coerced arguments
+ # (used for optimisation overrides)
+ # is_const_method boolean
+ # is_static_method boolean
+
+ is_cfunction = 1
+ original_sig = None
+ cached_specialized_types = None
+ from_fused = False
+ is_const_method = False
+
+ subtypes = ['return_type', 'args']
+
+ def __init__(self, return_type, args, has_varargs = 0,
+ exception_value = None, exception_check = 0, calling_convention = "",
+ nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
+ is_const_method = False, is_static_method=False,
+ templates = None, is_strict_signature = False):
+ self.return_type = return_type
+ self.args = args
+ self.has_varargs = has_varargs
+ self.optional_arg_count = optional_arg_count
+ self.exception_value = exception_value
+ self.exception_check = exception_check
+ self.calling_convention = calling_convention
+ self.nogil = nogil
+ self.with_gil = with_gil
+ self.is_overridable = is_overridable
+ self.is_const_method = is_const_method
+ self.is_static_method = is_static_method
+ self.templates = templates
+ self.is_strict_signature = is_strict_signature
+
+ def __repr__(self):
+ arg_reprs = list(map(repr, self.args))
+ if self.has_varargs:
+ arg_reprs.append("...")
+ if self.exception_value:
+ except_clause = " %r" % self.exception_value
+ else:
+ except_clause = ""
+ if self.exception_check:
+ except_clause += "?"
+ return "<CFuncType %s %s[%s]%s>" % (
+ repr(self.return_type),
+ self.calling_convention_prefix(),
+ ",".join(arg_reprs),
+ except_clause)
+
+ def with_with_gil(self, with_gil):
+ if with_gil == self.with_gil:
+ return self
+ else:
+ return CFuncType(
+ self.return_type, self.args, self.has_varargs,
+ self.exception_value, self.exception_check,
+ self.calling_convention, self.nogil,
+ with_gil,
+ self.is_overridable, self.optional_arg_count,
+ self.is_const_method, self.is_static_method,
+ self.templates, self.is_strict_signature)
+
+ def calling_convention_prefix(self):
+ cc = self.calling_convention
+ if cc:
+ return cc + " "
+ else:
+ return ""
+
+ def as_argument_type(self):
+ return c_ptr_type(self)
+
+ def same_c_signature_as(self, other_type, as_cmethod = 0):
+ return self.same_c_signature_as_resolved_type(
+ other_type.resolve(), as_cmethod)
+
+ def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False,
+ exact_semantics=True):
+ # If 'exact_semantics' is false, allow any equivalent C signatures
+ # if the Cython semantics are compatible, i.e. the same or wider for 'other_type'.
+
+ #print "CFuncType.same_c_signature_as_resolved_type:", \
+ # self, other_type, "as_cmethod =", as_cmethod ###
+ if other_type is error_type:
+ return 1
+ if not other_type.is_cfunction:
+ return 0
+ if self.is_overridable != other_type.is_overridable:
+ return 0
+ nargs = len(self.args)
+ if nargs != len(other_type.args):
+ return 0
+ # When comparing C method signatures, the first argument
+ # is exempt from compatibility checking (the proper check
+ # is performed elsewhere).
+ for i in range(as_cmethod, nargs):
+ if not self.args[i].type.same_as(other_type.args[i].type):
+ return 0
+ if self.has_varargs != other_type.has_varargs:
+ return 0
+ if self.optional_arg_count != other_type.optional_arg_count:
+ return 0
+ if as_pxd_definition:
+ # A narrowing of the return type declared in the pxd is allowed.
+ if not self.return_type.subtype_of_resolved_type(other_type.return_type):
+ return 0
+ else:
+ if not self.return_type.same_as(other_type.return_type):
+ return 0
+ if not self.same_calling_convention_as(other_type):
+ return 0
+ if exact_semantics:
+ if self.exception_check != other_type.exception_check:
+ return 0
+ if not self._same_exception_value(other_type.exception_value):
+ return 0
+ elif not self._is_exception_compatible_with(other_type):
+ return 0
+ return 1
+
+ def _same_exception_value(self, other_exc_value):
+ if self.exception_value == other_exc_value:
+ return 1
+ if self.exception_check != '+':
+ return 0
+ if not self.exception_value or not other_exc_value:
+ return 0
+ if self.exception_value.type != other_exc_value.type:
+ return 0
+ if self.exception_value.entry and other_exc_value.entry:
+ if self.exception_value.entry.cname != other_exc_value.entry.cname:
+ return 0
+ if self.exception_value.name != other_exc_value.name:
+ return 0
+ return 1
+
+ def compatible_signature_with(self, other_type, as_cmethod = 0):
+ return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
+
+ def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
+ #print "CFuncType.same_c_signature_as_resolved_type:", \
+ # self, other_type, "as_cmethod =", as_cmethod ###
+ if other_type is error_type:
+ return 1
+ if not other_type.is_cfunction:
+ return 0
+ if not self.is_overridable and other_type.is_overridable:
+ return 0
+ nargs = len(self.args)
+ if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
+ return 0
+ if self.optional_arg_count < other_type.optional_arg_count:
+ return 0
+ # When comparing C method signatures, the first argument
+ # is exempt from compatibility checking (the proper check
+ # is performed elsewhere).
+ for i in range(as_cmethod, len(other_type.args)):
+ if not self.args[i].type.same_as(
+ other_type.args[i].type):
+ return 0
+ if self.has_varargs != other_type.has_varargs:
+ return 0
+ if not self.return_type.subtype_of_resolved_type(other_type.return_type):
+ return 0
+ if not self.same_calling_convention_as(other_type):
+ return 0
+ if self.nogil != other_type.nogil:
+ return 0
+ if not self._is_exception_compatible_with(other_type):
+ return 0
+ self.original_sig = other_type.original_sig or other_type
+ return 1
+
+ def _is_exception_compatible_with(self, other_type):
+ # narrower exception checks are ok, but prevent mismatches
+ if self.exception_check == '+' and other_type.exception_check != '+':
+ # must catch C++ exceptions if we raise them
+ return 0
+ if not other_type.exception_check or other_type.exception_value is not None:
+ # if other does not *always* check exceptions, self must comply
+ if not self._same_exception_value(other_type.exception_value):
+ return 0
+ if self.exception_check and self.exception_check != other_type.exception_check:
+ # a redundant exception check doesn't make functions incompatible, but a missing one does
+ return 0
+ return 1
+
+ def narrower_c_signature_than(self, other_type, as_cmethod = 0):
+ return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
+
+ def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
+ if other_type is error_type:
+ return 1
+ if not other_type.is_cfunction:
+ return 0
+ nargs = len(self.args)
+ if nargs != len(other_type.args):
+ return 0
+ for i in range(as_cmethod, nargs):
+ if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
+ return 0
+ else:
+ self.args[i].needs_type_test = other_type.args[i].needs_type_test \
+ or not self.args[i].type.same_as(other_type.args[i].type)
+ if self.has_varargs != other_type.has_varargs:
+ return 0
+ if self.optional_arg_count != other_type.optional_arg_count:
+ return 0
+ if not self.return_type.subtype_of_resolved_type(other_type.return_type):
+ return 0
+ if not self.exception_check and other_type.exception_check:
+ # a redundant exception check doesn't make functions incompatible, but a missing one does
+ return 0
+ if not self._same_exception_value(other_type.exception_value):
+ return 0
+ return 1
+
+ def same_calling_convention_as(self, other):
+ ## XXX Under discussion ...
+ ## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
+ ## cs1 = self.calling_convention
+ ## cs2 = other.calling_convention
+ ## if (cs1 in callspec_words or
+ ## cs2 in callspec_words):
+ ## return cs1 == cs2
+ ## else:
+ ## return True
+ sc1 = self.calling_convention == '__stdcall'
+ sc2 = other.calling_convention == '__stdcall'
+ return sc1 == sc2
+
+ def same_as_resolved_type(self, other_type, as_cmethod=False):
+ return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \
+ and self.nogil == other_type.nogil
+
+ def pointer_assignable_from_resolved_type(self, rhs_type):
+ # Accept compatible exception/nogil declarations for the RHS.
+ if rhs_type is error_type:
+ return 1
+ if not rhs_type.is_cfunction:
+ return 0
+ return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \
+ and not (self.nogil and not rhs_type.nogil)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0,
+ with_calling_convention = 1):
+ arg_decl_list = []
+ for arg in self.args[:len(self.args)-self.optional_arg_count]:
+ arg_decl_list.append(
+ arg.type.declaration_code("", for_display, pyrex = pyrex))
+ if self.is_overridable:
+ arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
+ if self.optional_arg_count:
+ arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ if self.has_varargs:
+ arg_decl_list.append("...")
+ arg_decl_code = ", ".join(arg_decl_list)
+ if not arg_decl_code and not pyrex:
+ arg_decl_code = "void"
+ trailer = ""
+ if (pyrex or for_display) and not self.return_type.is_pyobject:
+ if self.exception_value and self.exception_check:
+ trailer = " except? %s" % self.exception_value
+ elif self.exception_value:
+ trailer = " except %s" % self.exception_value
+ elif self.exception_check == '+':
+ trailer = " except +"
+ elif self.exception_check and for_display:
+ # not spelled out by default, unless for human eyes
+ trailer = " except *"
+ if self.nogil:
+ trailer += " nogil"
+ if not with_calling_convention:
+ cc = ''
+ else:
+ cc = self.calling_convention_prefix()
+ if (not entity_code and cc) or entity_code.startswith("*"):
+ entity_code = "(%s%s)" % (cc, entity_code)
+ cc = ""
+ if self.is_const_method:
+ trailer += " const"
+ return self.return_type.declaration_code(
+ "%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
+ for_display, dll_linkage, pyrex)
+
+ def function_header_code(self, func_name, arg_code):
+ if self.is_const_method:
+ trailer = " const"
+ else:
+ trailer = ""
+ return "%s%s(%s)%s" % (self.calling_convention_prefix(),
+ func_name, arg_code, trailer)
+
+ def signature_string(self):
+ s = self.empty_declaration_code()
+ return s
+
+ def signature_cast_string(self):
+ s = self.declaration_code("(*)", with_calling_convention=False)
+ return '(%s)' % s
+
+ def specialize(self, values):
+ result = CFuncType(self.return_type.specialize(values),
+ [arg.specialize(values) for arg in self.args],
+ has_varargs = self.has_varargs,
+ exception_value = self.exception_value,
+ exception_check = self.exception_check,
+ calling_convention = self.calling_convention,
+ nogil = self.nogil,
+ with_gil = self.with_gil,
+ is_overridable = self.is_overridable,
+ optional_arg_count = self.optional_arg_count,
+ is_const_method = self.is_const_method,
+ is_static_method = self.is_static_method,
+ templates = self.templates)
+
+ result.from_fused = self.is_fused
+ return result
+
+ def opt_arg_cname(self, arg_name):
+ return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
+
+ # Methods that deal with Fused Types
+ # All but map_with_specific_entries should be called only on functions
+ # with fused types (and not on their corresponding specific versions).
+
+ def get_all_specialized_permutations(self, fused_types=None):
+ """
+ Permute all the types. For every specific instance of a fused type, we
+ want all other specific instances of all other fused types.
+
+ It returns an iterable of two-tuples of the cname that should prefix
+ the cname of the function, and a dict mapping any fused types to their
+ respective specific types.
+ """
+ assert self.is_fused
+
+ if fused_types is None:
+ fused_types = self.get_fused_types()
+
+ return get_all_specialized_permutations(fused_types)
+
+ def get_all_specialized_function_types(self):
+ """
+ Get all the specific function types of this one.
+ """
+ assert self.is_fused
+
+ if self.entry.fused_cfunction:
+ return [n.type for n in self.entry.fused_cfunction.nodes]
+ elif self.cached_specialized_types is not None:
+ return self.cached_specialized_types
+
+ result = []
+ permutations = self.get_all_specialized_permutations()
+
+ new_cfunc_entries = []
+ for cname, fused_to_specific in permutations:
+ new_func_type = self.entry.type.specialize(fused_to_specific)
+
+ if self.optional_arg_count:
+ # Remember, this method is set by CFuncDeclaratorNode
+ self.declare_opt_arg_struct(new_func_type, cname)
+
+ new_entry = copy.deepcopy(self.entry)
+ new_func_type.specialize_entry(new_entry, cname)
+
+ new_entry.type = new_func_type
+ new_func_type.entry = new_entry
+ result.append(new_func_type)
+
+ new_cfunc_entries.append(new_entry)
+
+ cfunc_entries = self.entry.scope.cfunc_entries
+ try:
+ cindex = cfunc_entries.index(self.entry)
+ except ValueError:
+ cfunc_entries.extend(new_cfunc_entries)
+ else:
+ cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+
+ self.cached_specialized_types = result
+
+ return result
+
+ def get_fused_types(self, result=None, seen=None, subtypes=None):
+ """Return fused types in the order they appear as parameter types"""
+ return super(CFuncType, self).get_fused_types(result, seen,
+ subtypes=['args'])
+
+ def specialize_entry(self, entry, cname):
+ assert not self.is_fused
+ specialize_entry(entry, cname)
+
+ def can_coerce_to_pyobject(self, env):
+ # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code
+ if self.has_varargs or self.optional_arg_count:
+ return False
+ if self.to_py_function is not None:
+ return self.to_py_function
+ for arg in self.args:
+ if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env):
+ return False
+ if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env):
+ return False
+ return True
+
+ def create_to_py_utility_code(self, env):
+ # FIXME: it seems we're trying to coerce in more cases than we should
+ if self.to_py_function is not None:
+ return self.to_py_function
+ if not self.can_coerce_to_pyobject(env):
+ return False
+ from .UtilityCode import CythonUtilityCode
+ safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1))
+ to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename
+
+ for arg in self.args:
+ if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env):
+ return False
+ if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env):
+ return False
+
+ def declared_type(ctype):
+ type_displayname = str(ctype.declaration_code("", for_display=True))
+ if ctype.is_pyobject:
+ arg_ctype = type_name = type_displayname
+ if ctype.is_builtin_type:
+ arg_ctype = ctype.name
+ elif not ctype.is_extension_type:
+ type_name = 'object'
+ type_displayname = None
+ else:
+ type_displayname = repr(type_displayname)
+ elif ctype is c_bint_type:
+ type_name = arg_ctype = 'bint'
+ else:
+ type_name = arg_ctype = type_displayname
+ if ctype is c_double_type:
+ type_displayname = 'float'
+ else:
+ type_displayname = repr(type_displayname)
+ return type_name, arg_ctype, type_displayname
+
+ class Arg(object):
+ def __init__(self, arg_name, arg_type):
+ self.name = arg_name
+ self.type = arg_type
+ self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type)
+
+ if self.return_type.is_void:
+ except_clause = 'except *'
+ elif self.return_type.is_pyobject:
+ except_clause = ''
+ elif self.exception_value:
+ except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value
+ else:
+ except_clause = 'except *'
+
+ context = {
+ 'cname': to_py_function,
+ 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)],
+ 'return_type': Arg('return', self.return_type),
+ 'except_clause': except_clause,
+ }
+ # FIXME: directives come from first defining environment and do not adapt for reuse
+ env.use_utility_code(CythonUtilityCode.load(
+ "cfunc.to_py", "CConvert.pyx",
+ outer_module_scope=env.global_scope(), # need access to types declared in module
+ context=context, compiler_directives=dict(env.global_scope().directives)))
+ self.to_py_function = to_py_function
+ return True
+
+
+def specialize_entry(entry, cname):
+ """
+ Specialize an entry of a copied fused function or method
+ """
+ entry.is_fused_specialized = True
+ entry.name = get_fused_cname(cname, entry.name)
+
+ if entry.is_cmethod:
+ entry.cname = entry.name
+ if entry.is_inherited:
+ entry.cname = StringEncoding.EncodedString(
+ "%s.%s" % (Naming.obj_base_cname, entry.cname))
+ else:
+ entry.cname = get_fused_cname(cname, entry.cname)
+
+ if entry.func_cname:
+ entry.func_cname = get_fused_cname(cname, entry.func_cname)
+
+def get_fused_cname(fused_cname, orig_cname):
+ """
+ Given the fused cname id and an original cname, return a specialized cname
+ """
+ assert fused_cname and orig_cname
+ return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
+ fused_cname, orig_cname))
+
+def unique(somelist):
+ seen = set()
+ result = []
+ for obj in somelist:
+ if obj not in seen:
+ result.append(obj)
+ seen.add(obj)
+
+ return result
+
+def get_all_specialized_permutations(fused_types):
+ return _get_all_specialized_permutations(unique(fused_types))
+
+def _get_all_specialized_permutations(fused_types, id="", f2s=()):
+ fused_type, = fused_types[0].get_fused_types()
+ result = []
+
+ for newid, specific_type in enumerate(fused_type.types):
+ # f2s = dict(f2s, **{ fused_type: specific_type })
+ f2s = dict(f2s)
+ f2s.update({ fused_type: specific_type })
+
+ if id:
+ cname = '%s_%s' % (id, newid)
+ else:
+ cname = str(newid)
+
+ if len(fused_types) > 1:
+ result.extend(_get_all_specialized_permutations(
+ fused_types[1:], cname, f2s))
+ else:
+ result.append((cname, f2s))
+
+ return result
+
+def specialization_signature_string(fused_compound_type, fused_to_specific):
+ """
+ Return the signature for a specialization of a fused type. e.g.
+
+ floating[:] ->
+ 'float' or 'double'
+
+ cdef fused ft:
+ float[:]
+ double[:]
+
+ ft ->
+ 'float[:]' or 'double[:]'
+
+ integral func(floating) ->
+ 'int (*func)(float)' or ...
+ """
+ fused_types = fused_compound_type.get_fused_types()
+ if len(fused_types) == 1:
+ fused_type = fused_types[0]
+ else:
+ fused_type = fused_compound_type
+
+ return fused_type.specialize(fused_to_specific).typeof_name()
+
+
+def get_specialized_types(type):
+ """
+ Return a list of specialized types in their declared order.
+ """
+ assert type.is_fused
+
+ if isinstance(type, FusedType):
+ result = list(type.types)
+ for specialized_type in result:
+ specialized_type.specialization_string = specialized_type.typeof_name()
+ else:
+ result = []
+ for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
+ specialized_type = type.specialize(f2s)
+ specialized_type.specialization_string = (
+ specialization_signature_string(type, f2s))
+ result.append(specialized_type)
+
+ return result
+
+
+class CFuncTypeArg(BaseType):
+ # name string
+ # cname string
+ # type PyrexType
+ # pos source file position
+
+ # FIXME: is this the right setup? should None be allowed here?
+ not_none = False
+ or_none = False
+ accept_none = True
+ accept_builtin_subtypes = False
+ annotation = None
+
+ subtypes = ['type']
+
+ def __init__(self, name, type, pos, cname=None, annotation=None):
+ self.name = name
+ if cname is not None:
+ self.cname = cname
+ else:
+ self.cname = Naming.var_prefix + name
+ if annotation is not None:
+ self.annotation = annotation
+ self.type = type
+ self.pos = pos
+ self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
+
+ def __repr__(self):
+ return "%s:%s" % (self.name, repr(self.type))
+
+ def declaration_code(self, for_display = 0):
+ return self.type.declaration_code(self.cname, for_display)
+
+ def specialize(self, values):
+ return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
+
+
+class ToPyStructUtilityCode(object):
+
+ requires = None
+
+ def __init__(self, type, forward_decl, env):
+ self.type = type
+ self.header = "static PyObject* %s(%s)" % (type.to_py_function,
+ type.declaration_code('s'))
+ self.forward_decl = forward_decl
+ self.env = env
+
+ def __eq__(self, other):
+ return isinstance(other, ToPyStructUtilityCode) and self.header == other.header
+
+ def __hash__(self):
+ return hash(self.header)
+
+ def get_tree(self, **kwargs):
+ pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto = output['utility_code_proto']
+
+ code.putln("%s {" % self.header)
+ code.putln("PyObject* res;")
+ code.putln("PyObject* member;")
+ code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" %
+ len(self.type.scope.var_entries))
+ for member in self.type.scope.var_entries:
+ nameconst_cname = code.get_py_string_const(member.name, identifier=True)
+ code.putln("%s; if (unlikely(!member)) goto bad;" % (
+ member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type)))
+ code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname)
+ code.putln("Py_DECREF(member);")
+ code.putln("return res;")
+ code.putln("bad:")
+ code.putln("Py_XDECREF(member);")
+ code.putln("Py_DECREF(res);")
+ code.putln("return NULL;")
+ code.putln("}")
+
+ # This is a bit of a hack, we need a forward declaration
+ # due to the way things are ordered in the module...
+ if self.forward_decl:
+ proto.putln(self.type.empty_declaration_code() + ';')
+ proto.putln(self.header + ";")
+
+ def inject_tree_and_scope_into(self, module_node):
+ pass
+
+
+class CStructOrUnionType(CType):
+ # name string
+ # cname string
+ # kind string "struct" or "union"
+ # scope StructOrUnionScope, or None if incomplete
+ # typedef_flag boolean
+ # packed boolean
+
+ # entry Entry
+
+ is_struct_or_union = 1
+ has_attributes = 1
+ exception_check = True
+
+ def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
+ self.name = name
+ self.cname = cname
+ self.kind = kind
+ self.scope = scope
+ self.typedef_flag = typedef_flag
+ self.is_struct = kind == 'struct'
+ self.to_py_function = "%s_to_py_%s" % (
+ Naming.convert_func_prefix, self.specialization_name())
+ self.from_py_function = "%s_from_py_%s" % (
+ Naming.convert_func_prefix, self.specialization_name())
+ self.exception_check = True
+ self._convert_to_py_code = None
+ self._convert_from_py_code = None
+ self.packed = packed
+
+ def can_coerce_to_pyobject(self, env):
+ if self._convert_to_py_code is False:
+ return None # tri-state-ish
+
+ if env.outer_scope is None:
+ return False
+
+ if self._convert_to_py_code is None:
+ is_union = not self.is_struct
+ unsafe_union_types = set()
+ safe_union_types = set()
+ for member in self.scope.var_entries:
+ member_type = member.type
+ if not member_type.can_coerce_to_pyobject(env):
+ self.to_py_function = None
+ self._convert_to_py_code = False
+ return False
+ if is_union:
+ if member_type.is_ptr or member_type.is_cpp_class:
+ unsafe_union_types.add(member_type)
+ else:
+ safe_union_types.add(member_type)
+
+ if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1):
+ # unsafe mix of safe and unsafe to convert types
+ self.from_py_function = None
+ self._convert_from_py_code = False
+ return False
+
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if not self.can_coerce_to_pyobject(env):
+ return False
+
+ if self._convert_to_py_code is None:
+ for member in self.scope.var_entries:
+ member.type.create_to_py_utility_code(env)
+ forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag
+ self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env)
+
+ env.use_utility_code(self._convert_to_py_code)
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ if env.outer_scope is None or self._convert_from_py_code is False:
+ return False
+ for member in self.scope.var_entries:
+ if not member.type.can_coerce_from_pyobject(env):
+ return False
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if env.outer_scope is None:
+ return False
+
+ if self._convert_from_py_code is False:
+ return None # tri-state-ish
+
+ if self._convert_from_py_code is None:
+ if not self.scope.var_entries:
+ # There are obviously missing fields; don't allow instantiation
+ # where absolutely no content is provided.
+ return False
+
+ for member in self.scope.var_entries:
+ if not member.type.create_from_py_utility_code(env):
+ self.from_py_function = None
+ self._convert_from_py_code = False
+ return False
+
+ context = dict(
+ struct_type=self,
+ var_entries=self.scope.var_entries,
+ funcname=self.from_py_function,
+ )
+ from .UtilityCode import CythonUtilityCode
+ self._convert_from_py_code = CythonUtilityCode.load(
+ "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility",
+ "CConvert.pyx",
+ outer_module_scope=env.global_scope(), # need access to types declared in module
+ context=context)
+
+ env.use_utility_code(self._convert_from_py_code)
+ return True
+
+ def __repr__(self):
+ return "<CStructOrUnionType %s %s%s>" % (
+ self.name, self.cname,
+ ("", " typedef")[self.typedef_flag])
+
+ def declaration_code(self, entity_code,
+ for_display=0, dll_linkage=None, pyrex=0):
+ if pyrex or for_display:
+ base_code = self.name
+ else:
+ if self.typedef_flag:
+ base_code = self.cname
+ else:
+ base_code = "%s %s" % (self.kind, self.cname)
+ base_code = public_decl(base_code, dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def __eq__(self, other):
+ try:
+ return (isinstance(other, CStructOrUnionType) and
+ self.name == other.name)
+ except AttributeError:
+ return False
+
+ def __lt__(self, other):
+ try:
+ return self.name < other.name
+ except AttributeError:
+ # this is arbitrary, but it makes sure we always have
+ # *some* kind of order
+ return False
+
+ def __hash__(self):
+ return hash(self.cname) ^ hash(self.kind)
+
+ def is_complete(self):
+ return self.scope is not None
+
+ def attributes_known(self):
+ return self.is_complete()
+
+ def can_be_complex(self):
+ # Does the struct consist of exactly two identical floats?
+ fields = self.scope.var_entries
+ if len(fields) != 2: return False
+ a, b = fields
+ return (a.type.is_float and b.type.is_float and
+ a.type.empty_declaration_code() ==
+ b.type.empty_declaration_code())
+
+ def struct_nesting_depth(self):
+ child_depths = [x.type.struct_nesting_depth()
+ for x in self.scope.var_entries]
+ return max(child_depths) + 1
+
+ def cast_code(self, expr_code):
+ if self.is_struct:
+ return expr_code
+ return super(CStructOrUnionType, self).cast_code(expr_code)
+
+cpp_string_conversions = ("std::string", "TString", "TStringBuf")
+
+builtin_cpp_conversions = {
+ # type element template params
+ "std::pair": 2,
+ "std::vector": 1,
+ "std::list": 1,
+ "std::set": 1,
+ "std::unordered_set": 1,
+ "std::map": 2,
+ "std::unordered_map": 2,
+ "std::complex": 1,
+ # arcadia_cpp_conversions
+ "TMaybe": 1,
+ "TVector": 1,
+ "THashMap": 2,
+ "TMap": 2,
+}
+
+class CppClassType(CType):
+ # name string
+ # cname string
+ # scope CppClassScope
+ # templates [string] or None
+
+ is_cpp_class = 1
+ has_attributes = 1
+ exception_check = True
+ namespace = None
+
+ # For struct-like declaration.
+ kind = "struct"
+ packed = False
+ typedef_flag = False
+
+ subtypes = ['templates']
+
+ def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None):
+ self.name = name
+ self.cname = cname
+ self.scope = scope
+ self.base_classes = base_classes
+ self.operators = []
+ self.templates = templates
+ self.template_type = template_type
+ self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ())
+ if templates and False: # https://github.com/cython/cython/issues/1868
+ self.specializations = {tuple(zip(templates, templates)): self}
+ else:
+ self.specializations = {}
+ self.is_cpp_string = cname in cpp_string_conversions
+
+ def use_conversion_utility(self, from_or_to):
+ pass
+
+ def maybe_unordered(self):
+ if 'unordered' in self.cname:
+ return 'unordered_'
+ else:
+ return ''
+
+ def can_coerce_from_pyobject(self, env):
+ if self.cname in builtin_cpp_conversions:
+ template_count = builtin_cpp_conversions[self.cname]
+ for ix, T in enumerate(self.templates or []):
+ if ix >= template_count:
+ break
+ if T.is_pyobject or not T.can_coerce_from_pyobject(env):
+ return False
+ return True
+ elif self.cname in cpp_string_conversions:
+ return True
+ return False
+
+ def create_from_py_utility_code(self, env):
+ if self.from_py_function is not None:
+ return True
+ if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
+ X = "XYZABC"
+ tags = []
+ context = {}
+ for ix, T in enumerate(self.templates or []):
+ if ix >= builtin_cpp_conversions[self.cname]:
+ break
+ if T.is_pyobject or not T.create_from_py_utility_code(env):
+ return False
+ tags.append(T.specialization_name())
+ context[X[ix]] = T
+
+ if self.cname in cpp_string_conversions:
+ cls = 'string'
+ tags = type_identifier(self),
+ elif self.cname.startswith('std::'):
+ cls = self.cname[5:]
+ else:
+ cls = 'arcadia_' + self.cname
+ cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags))
+ context.update({
+ 'cname': cname,
+ 'maybe_unordered': self.maybe_unordered(),
+ 'type': self.cname,
+ })
+ from .UtilityCode import CythonUtilityCode
+ env.use_utility_code(CythonUtilityCode.load(
+ cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx",
+ context=context, compiler_directives=env.directives))
+ self.from_py_function = cname
+ return True
+
+ def can_coerce_to_pyobject(self, env):
+ if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
+ for ix, T in enumerate(self.templates or []):
+ if ix >= builtin_cpp_conversions[self.cname]:
+ break
+ if T.is_pyobject or not T.can_coerce_to_pyobject(env):
+ return False
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if self.to_py_function is not None:
+ return True
+ if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
+ X = "XYZABC"
+ tags = []
+ context = {}
+ for ix, T in enumerate(self.templates or []):
+ if ix >= builtin_cpp_conversions[self.cname]:
+ break
+ if not T.create_to_py_utility_code(env):
+ return False
+ tags.append(T.specialization_name())
+ context[X[ix]] = T
+
+ if self.cname in cpp_string_conversions:
+ cls = 'string'
+ prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode
+ tags = type_identifier(self),
+ elif self.cname.startswith('std::'):
+ cls = self.cname[5:]
+ prefix = ''
+ else:
+ cls = 'arcadia_' + self.cname
+ prefix = ''
+ cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags))
+ context.update({
+ 'cname': cname,
+ 'maybe_unordered': self.maybe_unordered(),
+ 'type': self.cname,
+ })
+ from .UtilityCode import CythonUtilityCode
+ env.use_utility_code(CythonUtilityCode.load(
+ cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx",
+ context=context, compiler_directives=env.directives))
+ self.to_py_function = cname
+ return True
+
+ def is_template_type(self):
+ return self.templates is not None and self.template_type is None
+
+ def get_fused_types(self, result=None, seen=None):
+ if result is None:
+ result = []
+ seen = set()
+ if self.namespace:
+ self.namespace.get_fused_types(result, seen)
+ if self.templates:
+ for T in self.templates:
+ T.get_fused_types(result, seen)
+ return result
+
+ def specialize_here(self, pos, template_values=None):
+ if not self.is_template_type():
+ error(pos, "'%s' type is not a template" % self)
+ return error_type
+ if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates):
+ num_defaults = len(self.templates) - len(template_values)
+ partial_specialization = self.declaration_code('', template_params=template_values)
+ # Most of the time we don't need to declare anything typed to these
+ # default template arguments, but when we do there's no way in C++
+ # to reference this directly. However, it is common convention to
+ # provide a typedef in the template class that resolves to each
+ # template type. For now, allow the user to specify this name as
+ # the template parameter.
+ # TODO: Allow typedefs in cpp classes and search for it in this
+ # classes scope as a concrete name we could use.
+ template_values = template_values + [
+ TemplatePlaceholderType(
+ "%s::%s" % (partial_specialization, param.name), True)
+ for param in self.templates[-num_defaults:]]
+ if len(self.templates) != len(template_values):
+ error(pos, "%s templated type receives %d arguments, got %d" %
+ (self.name, len(self.templates), len(template_values)))
+ return error_type
+ has_object_template_param = False
+ for value in template_values:
+ if value.is_pyobject:
+ has_object_template_param = True
+ error(pos,
+ "Python object type '%s' cannot be used as a template argument" % value)
+ if has_object_template_param:
+ return error_type
+ return self.specialize(dict(zip(self.templates, template_values)))
+
+ def specialize(self, values):
+ if not self.templates and not self.namespace:
+ return self
+ if self.templates is None:
+ self.templates = []
+ key = tuple(values.items())
+ if key in self.specializations:
+ return self.specializations[key]
+ template_values = [t.specialize(values) for t in self.templates]
+ specialized = self.specializations[key] = \
+ CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
+ # Need to do these *after* self.specializations[key] is set
+ # to avoid infinite recursion on circular references.
+ specialized.base_classes = [b.specialize(values) for b in self.base_classes]
+ if self.namespace is not None:
+ specialized.namespace = self.namespace.specialize(values)
+ specialized.scope = self.scope.specialize(values, specialized)
+ if self.cname == 'std::vector':
+ # vector<bool> is special cased in the C++ standard, and its
+ # accessors do not necessarily return references to the underlying
+ # elements (which may be bit-packed).
+ # http://www.cplusplus.com/reference/vector/vector-bool/
+ # Here we pretend that the various methods return bool values
+ # (as the actual returned values are coercable to such, and
+ # we don't support call expressions as lvalues).
+ T = values.get(self.templates[0], None)
+ if T and not T.is_fused and T.empty_declaration_code() == 'bool':
+ for bit_ref_returner in ('at', 'back', 'front'):
+ if bit_ref_returner in specialized.scope.entries:
+ specialized.scope.entries[bit_ref_returner].type.return_type = T
+ return specialized
+
+ def deduce_template_params(self, actual):
+ if actual.is_const:
+ actual = actual.const_base_type
+ if actual.is_reference:
+ actual = actual.ref_base_type
+ if self == actual:
+ return {}
+ elif actual.is_cpp_class:
+ self_template_type = self
+ while getattr(self_template_type, 'template_type', None):
+ self_template_type = self_template_type.template_type
+ def all_bases(cls):
+ yield cls
+ for parent in cls.base_classes:
+ for base in all_bases(parent):
+ yield base
+ for actual_base in all_bases(actual):
+ template_type = actual_base
+ while getattr(template_type, 'template_type', None):
+ template_type = template_type.template_type
+ if (self_template_type.empty_declaration_code()
+ == template_type.empty_declaration_code()):
+ return reduce(
+ merge_template_deductions,
+ [formal_param.deduce_template_params(actual_param)
+ for (formal_param, actual_param)
+ in zip(self.templates, actual_base.templates)],
+ {})
+ else:
+ return {}
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0,
+ template_params = None):
+ if template_params is None:
+ template_params = self.templates
+ if self.templates:
+ template_strings = [param.declaration_code('', for_display, None, pyrex)
+ for param in template_params
+ if not is_optional_template_param(param) and not param.is_fused]
+ if for_display:
+ brackets = "[%s]"
+ else:
+ brackets = "<%s> "
+ templates = brackets % ",".join(template_strings)
+ else:
+ templates = ""
+ if pyrex or for_display:
+ base_code = "%s%s" % (self.name, templates)
+ else:
+ base_code = "%s%s" % (self.cname, templates)
+ if self.namespace is not None:
+ base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code)
+ base_code = public_decl(base_code, dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def is_subclass(self, other_type):
+ if self.same_as_resolved_type(other_type):
+ return 1
+ for base_class in self.base_classes:
+ if base_class.is_subclass(other_type):
+ return 1
+ return 0
+
+ def subclass_dist(self, super_type):
+ if self.same_as_resolved_type(super_type):
+ return 0
+ elif not self.base_classes:
+ return float('inf')
+ else:
+ return 1 + min(b.subclass_dist(super_type) for b in self.base_classes)
+
+ def same_as_resolved_type(self, other_type):
+ if other_type.is_cpp_class:
+ if self == other_type:
+ return 1
+ # This messy logic is needed due to GH Issue #1852.
+ elif (self.cname == other_type.cname and
+ (self.template_type and other_type.template_type
+ or self.templates
+ or other_type.templates)):
+ if self.templates == other_type.templates:
+ return 1
+ for t1, t2 in zip(self.templates, other_type.templates):
+ if is_optional_template_param(t1) and is_optional_template_param(t2):
+ break
+ if not t1.same_as_resolved_type(t2):
+ return 0
+ return 1
+ return 0
+
+ def assignable_from_resolved_type(self, other_type):
+ # TODO: handle operator=(...) here?
+ if other_type is error_type:
+ return True
+ elif other_type.is_cpp_class:
+ return other_type.is_subclass(self)
+ elif other_type.is_string and self.cname in cpp_string_conversions:
+ return True
+
+ def attributes_known(self):
+ return self.scope is not None
+
+ def find_cpp_operation_type(self, operator, operand_type=None):
+ operands = [self]
+ if operand_type is not None:
+ operands.append(operand_type)
+ # pos == None => no errors
+ operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
+ if not operator_entry:
+ return None
+ func_type = operator_entry.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ return func_type.return_type
+
+ def get_constructor(self, pos):
+ constructor = self.scope.lookup('<init>')
+ if constructor is not None:
+ return constructor
+
+ # Otherwise: automatically declare no-args default constructor.
+ # Make it "nogil" if the base classes allow it.
+ nogil = True
+ for base in self.base_classes:
+ base_constructor = base.scope.lookup('<init>')
+ if base_constructor and not base_constructor.type.nogil:
+ nogil = False
+ break
+
+ func_type = CFuncType(self, [], exception_check='+', nogil=nogil)
+ return self.scope.declare_cfunction(u'<init>', func_type, pos)
+
+ def check_nullary_constructor(self, pos, msg="stack allocated"):
+ constructor = self.scope.lookup(u'<init>')
+ if constructor is not None and best_match([], constructor.all_alternatives()) is None:
+ error(pos, "C++ class must have a nullary constructor to be %s" % msg)
+
+
+class TemplatePlaceholderType(CType):
+
+ def __init__(self, name, optional=False):
+ self.name = name
+ self.optional = optional
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if entity_code:
+ return self.name + " " + entity_code
+ else:
+ return self.name
+
+ def specialize(self, values):
+ if self in values:
+ return values[self]
+ else:
+ return self
+
+ def deduce_template_params(self, actual):
+ return {self: actual}
+
+ def same_as_resolved_type(self, other_type):
+ if isinstance(other_type, TemplatePlaceholderType):
+ return self.name == other_type.name
+ else:
+ return 0
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __cmp__(self, other):
+ if isinstance(other, TemplatePlaceholderType):
+ return cmp(self.name, other.name)
+ else:
+ return cmp(type(self), type(other))
+
+ def __eq__(self, other):
+ if isinstance(other, TemplatePlaceholderType):
+ return self.name == other.name
+ else:
+ return False
+
+def is_optional_template_param(type):
+ return isinstance(type, TemplatePlaceholderType) and type.optional
+
+
+class CEnumType(CIntLike, CType):
+ # name string
+ # cname string or None
+ # typedef_flag boolean
+ # values [string], populated during declaration analysis
+
+ is_enum = 1
+ signed = 1
+ rank = -1 # Ranks below any integer type
+
+ def __init__(self, name, cname, typedef_flag, namespace=None):
+ self.name = name
+ self.cname = cname
+ self.values = []
+ self.typedef_flag = typedef_flag
+ self.namespace = namespace
+ self.default_value = "(%s) 0" % self.empty_declaration_code()
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<CEnumType %s %s%s>" % (self.name, self.cname,
+ ("", " typedef")[self.typedef_flag])
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ base_code = self.name
+ else:
+ if self.namespace:
+ base_code = "%s::%s" % (
+ self.namespace.empty_declaration_code(), self.cname)
+ elif self.typedef_flag:
+ base_code = self.cname
+ else:
+ base_code = "enum %s" % self.cname
+ base_code = public_decl(base_code, dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+ def specialize(self, values):
+ if self.namespace:
+ namespace = self.namespace.specialize(values)
+ if namespace != self.namespace:
+ return CEnumType(
+ self.name, self.cname, self.typedef_flag, namespace)
+ return self
+
+ def create_type_wrapper(self, env):
+ from .UtilityCode import CythonUtilityCode
+ env.use_utility_code(CythonUtilityCode.load(
+ "EnumType", "CpdefEnums.pyx",
+ context={"name": self.name,
+ "items": tuple(self.values)},
+ outer_module_scope=env.global_scope()))
+
+
+class CTupleType(CType):
+ # components [PyrexType]
+
+ is_ctuple = True
+
+ def __init__(self, cname, components):
+ self.cname = cname
+ self.components = components
+ self.size = len(components)
+ self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
+ self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
+ self.exception_check = True
+ self._convert_to_py_code = None
+ self._convert_from_py_code = None
+
+ def __str__(self):
+ return "(%s)" % ", ".join(str(c) for c in self.components)
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ if pyrex or for_display:
+ return str(self)
+ else:
+ return self.base_declaration_code(self.cname, entity_code)
+
+ def can_coerce_to_pyobject(self, env):
+ for component in self.components:
+ if not component.can_coerce_to_pyobject(env):
+ return False
+ return True
+
+ def can_coerce_from_pyobject(self, env):
+ for component in self.components:
+ if not component.can_coerce_from_pyobject(env):
+ return False
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if self._convert_to_py_code is False:
+ return None # tri-state-ish
+
+ if self._convert_to_py_code is None:
+ for component in self.components:
+ if not component.create_to_py_utility_code(env):
+ self.to_py_function = None
+ self._convert_to_py_code = False
+ return False
+
+ context = dict(
+ struct_type_decl=self.empty_declaration_code(),
+ components=self.components,
+ funcname=self.to_py_function,
+ size=len(self.components)
+ )
+ self._convert_to_py_code = TempitaUtilityCode.load(
+ "ToPyCTupleUtility", "TypeConversion.c", context=context)
+
+ env.use_utility_code(self._convert_to_py_code)
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if self._convert_from_py_code is False:
+ return None # tri-state-ish
+
+ if self._convert_from_py_code is None:
+ for component in self.components:
+ if not component.create_from_py_utility_code(env):
+ self.from_py_function = None
+ self._convert_from_py_code = False
+ return False
+
+ context = dict(
+ struct_type_decl=self.empty_declaration_code(),
+ components=self.components,
+ funcname=self.from_py_function,
+ size=len(self.components)
+ )
+ self._convert_from_py_code = TempitaUtilityCode.load(
+ "FromPyCTupleUtility", "TypeConversion.c", context=context)
+
+ env.use_utility_code(self._convert_from_py_code)
+ return True
+
+ def cast_code(self, expr_code):
+ return expr_code
+
+
+def c_tuple_type(components):
+ components = tuple(components)
+ cname = Naming.ctuple_type_prefix + type_list_identifier(components)
+ tuple_type = CTupleType(cname, components)
+ return tuple_type
+
+
+class UnspecifiedType(PyrexType):
+ # Used as a placeholder until the type can be determined.
+
+ is_unspecified = 1
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ return "<unspecified>"
+
+ def same_as_resolved_type(self, other_type):
+ return False
+
+
+class ErrorType(PyrexType):
+ # Used to prevent propagation of error messages.
+
+ is_error = 1
+ exception_value = "0"
+ exception_check = 0
+ to_py_function = "dummy"
+ from_py_function = "dummy"
+
+ def create_to_py_utility_code(self, env):
+ return True
+
+ def create_from_py_utility_code(self, env):
+ return True
+
+ def declaration_code(self, entity_code,
+ for_display = 0, dll_linkage = None, pyrex = 0):
+ return "<error>"
+
+ def same_as_resolved_type(self, other_type):
+ return 1
+
+ def error_condition(self, result_code):
+ return "dummy"
+
+
+rank_to_type_name = (
+ "char", # 0
+ "short", # 1
+ "int", # 2
+ "long", # 3
+ "PY_LONG_LONG", # 4
+ "float", # 5
+ "double", # 6
+ "long double", # 7
+)
+
+_rank_to_type_name = list(rank_to_type_name)
+RANK_INT = _rank_to_type_name.index('int')
+RANK_LONG = _rank_to_type_name.index('long')
+RANK_FLOAT = _rank_to_type_name.index('float')
+UNSIGNED = 0
+SIGNED = 2
+
+error_type = ErrorType()
+unspecified_type = UnspecifiedType()
+
+py_object_type = PyObjectType()
+
+c_void_type = CVoidType()
+
+c_uchar_type = CIntType(0, UNSIGNED)
+c_ushort_type = CIntType(1, UNSIGNED)
+c_uint_type = CIntType(2, UNSIGNED)
+c_ulong_type = CIntType(3, UNSIGNED)
+c_ulonglong_type = CIntType(4, UNSIGNED)
+
+c_char_type = CIntType(0)
+c_short_type = CIntType(1)
+c_int_type = CIntType(2)
+c_long_type = CIntType(3)
+c_longlong_type = CIntType(4)
+
+c_schar_type = CIntType(0, SIGNED)
+c_sshort_type = CIntType(1, SIGNED)
+c_sint_type = CIntType(2, SIGNED)
+c_slong_type = CIntType(3, SIGNED)
+c_slonglong_type = CIntType(4, SIGNED)
+
+c_float_type = CFloatType(5, math_h_modifier='f')
+c_double_type = CFloatType(6)
+c_longdouble_type = CFloatType(7, math_h_modifier='l')
+
+c_float_complex_type = CComplexType(c_float_type)
+c_double_complex_type = CComplexType(c_double_type)
+c_longdouble_complex_type = CComplexType(c_longdouble_type)
+
+c_anon_enum_type = CAnonEnumType(-1)
+c_returncode_type = CReturnCodeType(RANK_INT)
+c_bint_type = CBIntType(RANK_INT)
+c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED)
+c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED)
+c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED)
+c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED)
+c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED)
+c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED)
+c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED)
+
+c_null_ptr_type = CNullPtrType(c_void_type)
+c_void_ptr_type = CPtrType(c_void_type)
+c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
+c_char_ptr_type = CPtrType(c_char_type)
+c_const_char_ptr_type = CPtrType(CConstType(c_char_type))
+c_uchar_ptr_type = CPtrType(c_uchar_type)
+c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type))
+c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
+c_int_ptr_type = CPtrType(c_int_type)
+c_py_unicode_ptr_type = CPtrType(c_py_unicode_type)
+c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type))
+c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type)
+c_ssize_t_ptr_type = CPtrType(c_ssize_t_type)
+c_size_t_ptr_type = CPtrType(c_size_t_type)
+
+# GIL state
+c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
+c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
+c_threadstate_ptr_type = CPtrType(c_threadstate_type)
+
+# PEP-539 "Py_tss_t" type
+c_pytss_t_type = CPyTSSTType()
+
+# the Py_buffer type is defined in Builtin.py
+c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
+c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
+
+# Not sure whether the unsigned versions and 'long long' should be in there
+# long long requires C99 and might be slow, and would always get preferred
+# when specialization happens through calling and not indexing
+cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type],
+ name="integral")
+# Omitting long double as it might be slow
+cy_floating_type = FusedType([c_float_type, c_double_type], name="floating")
+cy_numeric_type = FusedType([c_short_type,
+ c_int_type,
+ c_long_type,
+ c_float_type,
+ c_double_type,
+ c_float_complex_type,
+ c_double_complex_type], name="numeric")
+
+# buffer-related structs
+c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
+ None, 1, "__Pyx_Buf_DimInfo")
+c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
+c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
+c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
+ None, 1, "__Pyx_LocalBuf_ND")
+
+cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct",
+ None, 0, "__pyx_memoryview_obj")
+
+memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
+ None, 1, "__Pyx_memviewslice")
+
+modifiers_and_name_to_type = {
+ #(signed, longness, name) : type
+ (0, 0, "char"): c_uchar_type,
+ (1, 0, "char"): c_char_type,
+ (2, 0, "char"): c_schar_type,
+
+ (0, -1, "int"): c_ushort_type,
+ (0, 0, "int"): c_uint_type,
+ (0, 1, "int"): c_ulong_type,
+ (0, 2, "int"): c_ulonglong_type,
+
+ (1, -1, "int"): c_short_type,
+ (1, 0, "int"): c_int_type,
+ (1, 1, "int"): c_long_type,
+ (1, 2, "int"): c_longlong_type,
+
+ (2, -1, "int"): c_sshort_type,
+ (2, 0, "int"): c_sint_type,
+ (2, 1, "int"): c_slong_type,
+ (2, 2, "int"): c_slonglong_type,
+
+ (1, 0, "float"): c_float_type,
+ (1, 0, "double"): c_double_type,
+ (1, 1, "double"): c_longdouble_type,
+
+ (1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins
+ (1, 0, "floatcomplex"): c_float_complex_type,
+ (1, 0, "doublecomplex"): c_double_complex_type,
+ (1, 1, "doublecomplex"): c_longdouble_complex_type,
+
+ #
+ (1, 0, "void"): c_void_type,
+ (1, 0, "Py_tss_t"): c_pytss_t_type,
+
+ (1, 0, "bint"): c_bint_type,
+ (0, 0, "Py_UNICODE"): c_py_unicode_type,
+ (0, 0, "Py_UCS4"): c_py_ucs4_type,
+ (2, 0, "Py_hash_t"): c_py_hash_t_type,
+ (2, 0, "Py_ssize_t"): c_py_ssize_t_type,
+ (2, 0, "ssize_t") : c_ssize_t_type,
+ (0, 0, "size_t") : c_size_t_type,
+ (2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
+
+ (1, 0, "object"): py_object_type,
+}
+
+def is_promotion(src_type, dst_type):
+ # It's hard to find a hard definition of promotion, but empirical
+ # evidence suggests that the below is all that's allowed.
+ if src_type.is_numeric:
+ if dst_type.same_as(c_int_type):
+ unsigned = (not src_type.signed)
+ return (src_type.is_enum or
+ (src_type.is_int and
+ unsigned + src_type.rank < dst_type.rank))
+ elif dst_type.same_as(c_double_type):
+ return src_type.is_float and src_type.rank <= dst_type.rank
+ return False
+
+def best_match(arg_types, functions, pos=None, env=None, args=None):
+ """
+ Given a list args of arguments and a list of functions, choose one
+ to call which seems to be the "best" fit for this list of arguments.
+ This function is used, e.g., when deciding which overloaded method
+ to dispatch for C++ classes.
+
+ We first eliminate functions based on arity, and if only one
+ function has the correct arity, we return it. Otherwise, we weight
+ functions based on how much work must be done to convert the
+ arguments, with the following priorities:
+ * identical types or pointers to identical types
+ * promotions
+ * non-Python types
+ That is, we prefer functions where no arguments need converted,
+ and failing that, functions where only promotions are required, and
+ so on.
+
+ If no function is deemed a good fit, or if two or more functions have
+ the same weight, we return None (as there is no best match). If pos
+ is not None, we also generate an error.
+ """
+ # TODO: args should be a list of types, not a list of Nodes.
+ actual_nargs = len(arg_types)
+
+ candidates = []
+ errors = []
+ for func in functions:
+ error_mesg = ""
+ func_type = func.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ # Check function type
+ if not func_type.is_cfunction:
+ if not func_type.is_error and pos is not None:
+ error_mesg = "Calling non-function type '%s'" % func_type
+ errors.append((func, error_mesg))
+ continue
+ # Check no. of args
+ max_nargs = len(func_type.args)
+ min_nargs = max_nargs - func_type.optional_arg_count
+ if actual_nargs < min_nargs or \
+ (not func_type.has_varargs and actual_nargs > max_nargs):
+ if max_nargs == min_nargs and not func_type.has_varargs:
+ expectation = max_nargs
+ elif actual_nargs < min_nargs:
+ expectation = "at least %s" % min_nargs
+ else:
+ expectation = "at most %s" % max_nargs
+ error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \
+ % (expectation, actual_nargs)
+ errors.append((func, error_mesg))
+ continue
+ if func_type.templates:
+ deductions = reduce(
+ merge_template_deductions,
+ [pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
+ {})
+ if deductions is None:
+ errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (func_type, ', '.join(map(str, arg_types)))))
+ elif len(deductions) < len(func_type.templates):
+ errors.append((func, "Unable to deduce type parameter %s" % (
+ ", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
+ else:
+ type_list = [deductions[param] for param in func_type.templates]
+ from .Symtab import Entry
+ specialization = Entry(
+ name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
+ cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]),
+ type = func_type.specialize(deductions),
+ pos = func.pos)
+ candidates.append((specialization, specialization.type))
+ else:
+ candidates.append((func, func_type))
+
+ # Optimize the most common case of no overloading...
+ if len(candidates) == 1:
+ return candidates[0][0]
+ elif len(candidates) == 0:
+ if pos is not None:
+ func, errmsg = errors[0]
+ if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
+ error(pos, errmsg)
+ else:
+ error(pos, "no suitable method found")
+ return None
+
+ possibilities = []
+ bad_types = []
+ needed_coercions = {}
+
+ for index, (func, func_type) in enumerate(candidates):
+ score = [0,0,0,0,0,0,0]
+ for i in range(min(actual_nargs, len(func_type.args))):
+ src_type = arg_types[i]
+ dst_type = func_type.args[i].type
+
+ assignable = dst_type.assignable_from(src_type)
+
+ # Now take care of unprefixed string literals. So when you call a cdef
+ # function that takes a char *, the coercion will mean that the
+ # type will simply become bytes. We need to do this coercion
+ # manually for overloaded and fused functions
+ if not assignable:
+ c_src_type = None
+ if src_type.is_pyobject:
+ if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
+ c_src_type = dst_type.resolve()
+ else:
+ c_src_type = src_type.default_coerced_ctype()
+ elif src_type.is_pythran_expr:
+ c_src_type = src_type.org_buffer
+
+ if c_src_type is not None:
+ assignable = dst_type.assignable_from(c_src_type)
+ if assignable:
+ src_type = c_src_type
+ needed_coercions[func] = (i, dst_type)
+
+ if assignable:
+ if src_type == dst_type or dst_type.same_as(src_type):
+ pass # score 0
+ elif func_type.is_strict_signature:
+ break # exact match requested but not found
+ elif is_promotion(src_type, dst_type):
+ score[2] += 1
+ elif ((src_type.is_int and dst_type.is_int) or
+ (src_type.is_float and dst_type.is_float)):
+ score[2] += abs(dst_type.rank + (not dst_type.signed) -
+ (src_type.rank + (not src_type.signed))) + 1
+ elif dst_type.is_ptr and src_type.is_ptr:
+ if dst_type.base_type == c_void_type:
+ score[4] += 1
+ elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type):
+ score[6] += src_type.base_type.subclass_dist(dst_type.base_type)
+ else:
+ score[5] += 1
+ elif not src_type.is_pyobject:
+ score[1] += 1
+ else:
+ score[0] += 1
+ else:
+ error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type)
+ bad_types.append((func, error_mesg))
+ break
+ else:
+ possibilities.append((score, index, func)) # so we can sort it
+
+ if possibilities:
+ possibilities.sort()
+ if len(possibilities) > 1:
+ score1 = possibilities[0][0]
+ score2 = possibilities[1][0]
+ if score1 == score2:
+ if pos is not None:
+ error(pos, "ambiguous overloaded method")
+ return None
+
+ function = possibilities[0][-1]
+
+ if function in needed_coercions and env:
+ arg_i, coerce_to_type = needed_coercions[function]
+ args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env)
+
+ return function
+
+ if pos is not None:
+ if len(bad_types) == 1:
+ error(pos, bad_types[0][1])
+ else:
+ error(pos, "no suitable method found")
+
+ return None
+
+def merge_template_deductions(a, b):
+ if a is None or b is None:
+ return None
+ all = a
+ for param, value in b.items():
+ if param in all:
+ if a[param] != b[param]:
+ return None
+ else:
+ all[param] = value
+ return all
+
+
+def widest_numeric_type(type1, type2):
+ """Given two numeric types, return the narrowest type encompassing both of them.
+ """
+ if type1.is_reference:
+ type1 = type1.ref_base_type
+ if type2.is_reference:
+ type2 = type2.ref_base_type
+ if type1.is_const:
+ type1 = type1.const_base_type
+ if type2.is_const:
+ type2 = type2.const_base_type
+ if type1 == type2:
+ widest_type = type1
+ elif type1.is_complex or type2.is_complex:
+ def real_type(ntype):
+ if ntype.is_complex:
+ return ntype.real_type
+ return ntype
+ widest_type = CComplexType(
+ widest_numeric_type(
+ real_type(type1),
+ real_type(type2)))
+ elif type1.is_enum and type2.is_enum:
+ widest_type = c_int_type
+ elif type1.rank < type2.rank:
+ widest_type = type2
+ elif type1.rank > type2.rank:
+ widest_type = type1
+ elif type1.signed < type2.signed:
+ widest_type = type1
+ elif type1.signed > type2.signed:
+ widest_type = type2
+ elif type1.is_typedef > type2.is_typedef:
+ widest_type = type1
+ else:
+ widest_type = type2
+ return widest_type
+
+
+def numeric_type_fits(small_type, large_type):
+ return widest_numeric_type(small_type, large_type) == large_type
+
+
+def independent_spanning_type(type1, type2):
+ # Return a type assignable independently from both type1 and
+ # type2, but do not require any interoperability between the two.
+ # For example, in "True * 2", it is safe to assume an integer
+ # result type (so spanning_type() will do the right thing),
+ # whereas "x = True or 2" must evaluate to a type that can hold
+ # both a boolean value and an integer, so this function works
+ # better.
+ if type1.is_reference ^ type2.is_reference:
+ if type1.is_reference:
+ type1 = type1.ref_base_type
+ else:
+ type2 = type2.ref_base_type
+ if type1 == type2:
+ return type1
+ elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric):
+ # special case: if one of the results is a bint and the other
+ # is another C integer, we must prevent returning a numeric
+ # type so that we do not lose the ability to coerce to a
+ # Python bool if we have to.
+ return py_object_type
+ span_type = _spanning_type(type1, type2)
+ if span_type is None:
+ return error_type
+ return span_type
+
+def spanning_type(type1, type2):
+ # Return a type assignable from both type1 and type2, or
+ # py_object_type if no better type is found. Assumes that the
+ # code that calls this will try a coercion afterwards, which will
+ # fail if the types cannot actually coerce to a py_object_type.
+ if type1 == type2:
+ return type1
+ elif type1 is py_object_type or type2 is py_object_type:
+ return py_object_type
+ elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
+ # Py_UNICODE behaves more like a string than an int
+ return py_object_type
+ span_type = _spanning_type(type1, type2)
+ if span_type is None:
+ return py_object_type
+ return span_type
+
+def _spanning_type(type1, type2):
+ if type1.is_numeric and type2.is_numeric:
+ return widest_numeric_type(type1, type2)
+ elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric:
+ return widest_numeric_type(c_double_type, type2)
+ elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric:
+ return widest_numeric_type(type1, c_double_type)
+ elif type1.is_extension_type and type2.is_extension_type:
+ return widest_extension_type(type1, type2)
+ elif type1.is_pyobject or type2.is_pyobject:
+ return py_object_type
+ elif type1.assignable_from(type2):
+ if type1.is_extension_type and type1.typeobj_is_imported():
+ # external types are unsafe, so we use PyObject instead
+ return py_object_type
+ return type1
+ elif type2.assignable_from(type1):
+ if type2.is_extension_type and type2.typeobj_is_imported():
+ # external types are unsafe, so we use PyObject instead
+ return py_object_type
+ return type2
+ elif type1.is_ptr and type2.is_ptr:
+ if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class:
+ common_base = widest_cpp_type(type1.base_type, type2.base_type)
+ if common_base:
+ return CPtrType(common_base)
+ # incompatible pointers, void* will do as a result
+ return c_void_ptr_type
+ else:
+ return None
+
+def widest_extension_type(type1, type2):
+ if type1.typeobj_is_imported() or type2.typeobj_is_imported():
+ return py_object_type
+ while True:
+ if type1.subtype_of(type2):
+ return type2
+ elif type2.subtype_of(type1):
+ return type1
+ type1, type2 = type1.base_type, type2.base_type
+ if type1 is None or type2 is None:
+ return py_object_type
+
+def widest_cpp_type(type1, type2):
+ @cached_function
+ def bases(type):
+ all = set()
+ for base in type.base_classes:
+ all.add(base)
+ all.update(bases(base))
+ return all
+ common_bases = bases(type1).intersection(bases(type2))
+ common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set())
+ candidates = [b for b in common_bases if b not in common_bases_bases]
+ if len(candidates) == 1:
+ return candidates[0]
+ else:
+ # Fall back to void* for now.
+ return None
+
+
+def simple_c_type(signed, longness, name):
+ # Find type descriptor for simple type given name and modifiers.
+ # Returns None if arguments don't make sense.
+ return modifiers_and_name_to_type.get((signed, longness, name))
+
+def parse_basic_type(name):
+ base = None
+ if name.startswith('p_'):
+ base = parse_basic_type(name[2:])
+ elif name.startswith('p'):
+ base = parse_basic_type(name[1:])
+ elif name.endswith('*'):
+ base = parse_basic_type(name[:-1])
+ if base:
+ return CPtrType(base)
+ #
+ basic_type = simple_c_type(1, 0, name)
+ if basic_type:
+ return basic_type
+ #
+ signed = 1
+ longness = 0
+ if name == 'Py_UNICODE':
+ signed = 0
+ elif name == 'Py_UCS4':
+ signed = 0
+ elif name == 'Py_hash_t':
+ signed = 2
+ elif name == 'Py_ssize_t':
+ signed = 2
+ elif name == 'ssize_t':
+ signed = 2
+ elif name == 'size_t':
+ signed = 0
+ else:
+ if name.startswith('u'):
+ name = name[1:]
+ signed = 0
+ elif (name.startswith('s') and
+ not name.startswith('short')):
+ name = name[1:]
+ signed = 2
+ longness = 0
+ while name.startswith('short'):
+ name = name.replace('short', '', 1).strip()
+ longness -= 1
+ while name.startswith('long'):
+ name = name.replace('long', '', 1).strip()
+ longness += 1
+ if longness != 0 and not name:
+ name = 'int'
+ return simple_c_type(signed, longness, name)
+
+def c_array_type(base_type, size):
+ # Construct a C array type.
+ if base_type is error_type:
+ return error_type
+ else:
+ return CArrayType(base_type, size)
+
+def c_ptr_type(base_type):
+ # Construct a C pointer type.
+ if base_type is error_type:
+ return error_type
+ elif base_type.is_reference:
+ return CPtrType(base_type.ref_base_type)
+ else:
+ return CPtrType(base_type)
+
+def c_ref_type(base_type):
+ # Construct a C reference type
+ if base_type is error_type:
+ return error_type
+ else:
+ return CReferenceType(base_type)
+
+def c_const_type(base_type):
+ # Construct a C const type.
+ if base_type is error_type:
+ return error_type
+ else:
+ return CConstType(base_type)
+
+def same_type(type1, type2):
+ return type1.same_as(type2)
+
+def assignable_from(type1, type2):
+ return type1.assignable_from(type2)
+
+def typecast(to_type, from_type, expr_code):
+ # Return expr_code cast to a C type which can be
+ # assigned to to_type, assuming its existing C type
+ # is from_type.
+ if (to_type is from_type or
+ (not to_type.is_pyobject and assignable_from(to_type, from_type))):
+ return expr_code
+ elif (to_type is py_object_type and from_type and
+ from_type.is_builtin_type and from_type.name != 'type'):
+ # no cast needed, builtins are PyObject* already
+ return expr_code
+ else:
+ #print "typecast: to", to_type, "from", from_type ###
+ return to_type.cast_code(expr_code)
+
+def type_list_identifier(types):
+ return cap_length('__and_'.join(type_identifier(type) for type in types))
+
+_type_identifier_cache = {}
+def type_identifier(type):
+ decl = type.empty_declaration_code()
+ safe = _type_identifier_cache.get(decl)
+ if safe is None:
+ safe = decl
+ safe = re.sub(' +', ' ', safe)
+ safe = re.sub(' ([^a-zA-Z0-9_])', r'\1', safe)
+ safe = re.sub('([^a-zA-Z0-9_]) ', r'\1', safe)
+ safe = (safe.replace('__', '__dunder')
+ .replace('const ', '__const_')
+ .replace(' ', '__space_')
+ .replace('*', '__ptr')
+ .replace('&', '__ref')
+ .replace('[', '__lArr')
+ .replace(']', '__rArr')
+ .replace('<', '__lAng')
+ .replace('>', '__rAng')
+ .replace('(', '__lParen')
+ .replace(')', '__rParen')
+ .replace(',', '__comma_')
+ .replace('::', '__in_'))
+ safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe))
+ _type_identifier_cache[decl] = safe
+ return safe
+
+def cap_length(s, max_prefix=63, max_len=1024):
+ if len(s) <= max_prefix:
+ return s
+ hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6]
+ return '%s__%s__etc' % (hash_prefix, s[:max_len-17])
diff --git a/contrib/tools/cython/Cython/Compiler/Pythran.py b/contrib/tools/cython/Cython/Compiler/Pythran.py
new file mode 100644
index 0000000000..c02704a918
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Pythran.py
@@ -0,0 +1,227 @@
+# cython: language_level=3
+
+from __future__ import absolute_import
+
+from .PyrexTypes import CType, CTypedefType, CStructOrUnionType
+
+import cython
+
+try:
+ import pythran
+ pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9)
+ pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6)
+except ImportError:
+ pythran = None
+ pythran_is_pre_0_9 = True
+ pythran_is_pre_0_9_6 = True
+
+if pythran_is_pre_0_9_6:
+ pythran_builtins = '__builtin__'
+else:
+ pythran_builtins = 'builtins'
+
+
+# Pythran/Numpy specific operations
+
+def has_np_pythran(env):
+ if env is None:
+ return False
+ directives = getattr(env, 'directives', None)
+ return (directives and directives.get('np_pythran', False))
+
+@cython.ccall
+def is_pythran_supported_dtype(type_):
+ if isinstance(type_, CTypedefType):
+ return is_pythran_supported_type(type_.typedef_base_type)
+ return type_.is_numeric
+
+
+def pythran_type(Ty, ptype="ndarray"):
+ if Ty.is_buffer:
+ ndim,dtype = Ty.ndim, Ty.dtype
+ if isinstance(dtype, CStructOrUnionType):
+ ctype = dtype.cname
+ elif isinstance(dtype, CType):
+ ctype = dtype.sign_and_name()
+ elif isinstance(dtype, CTypedefType):
+ ctype = dtype.typedef_cname
+ else:
+ raise ValueError("unsupported type %s!" % dtype)
+ if pythran_is_pre_0_9:
+ return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
+ else:
+ return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim))
+ if Ty.is_pythran_expr:
+ return Ty.pythran_type
+ #if Ty.is_none:
+ # return "decltype(pythonic::builtins::None)"
+ if Ty.is_numeric:
+ return Ty.sign_and_name()
+ raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty)))
+
+
+@cython.cfunc
+def type_remove_ref(ty):
+ return "typename std::remove_reference<%s>::type" % ty
+
+
+def pythran_binop_type(op, tA, tB):
+ if op == '**':
+ return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % (
+ pythran_type(tA), pythran_type(tB))
+ else:
+ return "decltype(std::declval<%s>() %s std::declval<%s>())" % (
+ pythran_type(tA), op, pythran_type(tB))
+
+
+def pythran_unaryop_type(op, type_):
+ return "decltype(%sstd::declval<%s>())" % (
+ op, pythran_type(type_))
+
+
+@cython.cfunc
+def _index_access(index_code, indices):
+ indexing = ",".join([index_code(idx) for idx in indices])
+ return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
+
+
+def _index_type_code(index_with_type):
+ idx, index_type = index_with_type
+ if idx.is_slice:
+ n = 2 + int(not idx.step.is_none)
+ return "pythonic::%s::functor::slice{}(%s)" % (
+ pythran_builtins,
+ ",".join(["0"]*n))
+ elif index_type.is_int:
+ return "std::declval<%s>()" % index_type.sign_and_name()
+ elif index_type.is_pythran_expr:
+ return "std::declval<%s>()" % index_type.pythran_type
+ raise ValueError("unsupported indexing type %s!" % index_type)
+
+
+def _index_code(idx):
+ if idx.is_slice:
+ values = idx.start, idx.stop, idx.step
+ if idx.step.is_none:
+ func = "contiguous_slice"
+ values = values[:2]
+ else:
+ func = "slice"
+ return "pythonic::types::%s(%s)" % (
+ func, ",".join((v.pythran_result() for v in values)))
+ elif idx.type.is_int:
+ return to_pythran(idx)
+ elif idx.type.is_pythran_expr:
+ return idx.pythran_result()
+ raise ValueError("unsupported indexing type %s" % idx.type)
+
+
+def pythran_indexing_type(type_, indices):
+ return type_remove_ref("decltype(std::declval<%s>()%s)" % (
+ pythran_type(type_),
+ _index_access(_index_type_code, indices),
+ ))
+
+
+def pythran_indexing_code(indices):
+ return _index_access(_index_code, indices)
+
+def np_func_to_list(func):
+ if not func.is_numpy_attribute:
+ return []
+ return np_func_to_list(func.obj) + [func.attribute]
+
+if pythran is None:
+ def pythran_is_numpy_func_supported(name):
+ return False
+else:
+ def pythran_is_numpy_func_supported(func):
+ CurF = pythran.tables.MODULES['numpy']
+ FL = np_func_to_list(func)
+ for F in FL:
+ CurF = CurF.get(F, None)
+ if CurF is None:
+ return False
+ return True
+
+def pythran_functor(func):
+ func = np_func_to_list(func)
+ submodules = "::".join(func[:-1] + ["functor"])
+ return "pythonic::numpy::%s::%s" % (submodules, func[-1])
+
+def pythran_func_type(func, args):
+ args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
+ return "decltype(%s{}(%s))" % (pythran_functor(func), args)
+
+
+@cython.ccall
+def to_pythran(op, ptype=None):
+ op_type = op.type
+ if op_type.is_int:
+ # Make sure that integer literals always have exactly the type that the templates expect.
+ return op_type.cast_code(op.result())
+ if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]):
+ return op.result()
+ if op.is_none:
+ return "pythonic::%s::None" % pythran_builtins
+ if ptype is None:
+ ptype = pythran_type(op_type)
+
+ assert op.type.is_pyobject
+ return "from_python<%s>(%s)" % (ptype, op.py_result())
+
+
+@cython.cfunc
+def is_type(type_, types):
+ for attr in types:
+ if getattr(type_, attr, False):
+ return True
+ return False
+
+
+def is_pythran_supported_node_or_none(node):
+ return node.is_none or is_pythran_supported_type(node.type)
+
+
+@cython.ccall
+def is_pythran_supported_type(type_):
+ pythran_supported = (
+ "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex")
+ return is_type(type_, pythran_supported) or is_pythran_expr(type_)
+
+
+def is_pythran_supported_operation_type(type_):
+ pythran_supported = (
+ "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
+ return is_type(type_,pythran_supported) or is_pythran_expr(type_)
+
+
+@cython.ccall
+def is_pythran_expr(type_):
+ return type_.is_pythran_expr
+
+
+def is_pythran_buffer(type_):
+ return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and
+ type_.mode in ("c", "strided") and not type_.cast)
+
+def pythran_get_func_include_file(func):
+ func = np_func_to_list(func)
+ return "pythonic/numpy/%s.hpp" % "/".join(func)
+
+def include_pythran_generic(env):
+ # Generic files
+ env.add_include_file("pythonic/core.hpp")
+ env.add_include_file("pythonic/python/core.hpp")
+ env.add_include_file("pythonic/types/bool.hpp")
+ env.add_include_file("pythonic/types/ndarray.hpp")
+ env.add_include_file("pythonic/numpy/power.hpp")
+ env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins)
+ env.add_include_file("<new>") # for placement new
+
+ for i in (8, 16, 32, 64):
+ env.add_include_file("pythonic/types/uint%d.hpp" % i)
+ env.add_include_file("pythonic/types/int%d.hpp" % i)
+ for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
+ "complex", "complex64", "complex128"):
+ env.add_include_file("pythonic/types/%s.hpp" % t)
diff --git a/contrib/tools/cython/Cython/Compiler/Scanning.pxd b/contrib/tools/cython/Cython/Compiler/Scanning.pxd
new file mode 100644
index 0000000000..59593f88a2
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Scanning.pxd
@@ -0,0 +1,67 @@
+from __future__ import absolute_import
+
+import cython
+
+from ..Plex.Scanners cimport Scanner
+
+cdef unicode any_string_prefix, IDENT
+
+cdef get_lexicon()
+cdef initial_compile_time_env()
+
+cdef class Method:
+ cdef object name
+ cdef dict kwargs
+ cdef readonly object __name__ # for tracing the scanner
+
+## methods commented with '##' out are used by Parsing.py when compiled.
+
+@cython.final
+cdef class CompileTimeScope:
+ cdef public dict entries
+ cdef public CompileTimeScope outer
+ ##cdef declare(self, name, value)
+ ##cdef lookup_here(self, name)
+ ##cpdef lookup(self, name)
+
+@cython.final
+cdef class PyrexScanner(Scanner):
+ cdef public context
+ cdef public list included_files
+ cdef public CompileTimeScope compile_time_env
+ cdef public bint compile_time_eval
+ cdef public bint compile_time_expr
+ cdef public bint parse_comments
+ cdef public bint in_python_file
+ cdef public source_encoding
+ cdef set keywords
+ cdef public list indentation_stack
+ cdef public indentation_char
+ cdef public int bracket_nesting_level
+ cdef readonly bint async_enabled
+ cdef public sy
+ cdef public systring
+
+ cdef long current_level(self)
+ #cpdef commentline(self, text)
+ #cpdef open_bracket_action(self, text)
+ #cpdef close_bracket_action(self, text)
+ #cpdef newline_action(self, text)
+ #cpdef begin_string_action(self, text)
+ #cpdef end_string_action(self, text)
+ #cpdef unclosed_string_action(self, text)
+ @cython.locals(current_level=cython.long, new_level=cython.long)
+ cpdef indentation_action(self, text)
+ #cpdef eof_action(self, text)
+ ##cdef next(self)
+ ##cdef peek(self)
+ #cpdef put_back(self, sy, systring)
+ #cdef unread(self, token, value)
+ ##cdef bint expect(self, what, message = *) except -2
+ ##cdef expect_keyword(self, what, message = *)
+ ##cdef expected(self, what, message = *)
+ ##cdef expect_indent(self)
+ ##cdef expect_dedent(self)
+ ##cdef expect_newline(self, message=*, bint ignore_semicolon=*)
+ ##cdef int enter_async(self) except -1
+ ##cdef int exit_async(self) except -1
diff --git a/contrib/tools/cython/Cython/Compiler/Scanning.py b/contrib/tools/cython/Cython/Compiler/Scanning.py
new file mode 100644
index 0000000000..c721bba69b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Scanning.py
@@ -0,0 +1,553 @@
+# cython: infer_types=True, language_level=3, py2_import=True, auto_pickle=False
+#
+# Cython Scanner
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(make_lexicon=object, lexicon=object,
+ print_function=object, error=object, warning=object,
+ os=object, platform=object)
+
+import os
+import platform
+
+from .. import Utils
+from ..Plex.Scanners import Scanner
+from ..Plex.Errors import UnrecognizedInput
+from .Errors import error, warning
+from .Lexicon import any_string_prefix, make_lexicon, IDENT
+from .Future import print_function
+
+debug_scanner = 0
+trace_scanner = 0
+scanner_debug_flags = 0
+scanner_dump_file = None
+
+lexicon = None
+
+
+def get_lexicon():
+ global lexicon
+ if not lexicon:
+ lexicon = make_lexicon()
+ return lexicon
+
+
+#------------------------------------------------------------------
+
+py_reserved_words = [
+ "global", "nonlocal", "def", "class", "print", "del", "pass", "break",
+ "continue", "return", "raise", "import", "exec", "try",
+ "except", "finally", "while", "if", "elif", "else", "for",
+ "in", "assert", "and", "or", "not", "is", "lambda",
+ "from", "yield", "with",
+]
+
+pyx_reserved_words = py_reserved_words + [
+ "include", "ctypedef", "cdef", "cpdef",
+ "cimport", "DEF", "IF", "ELIF", "ELSE"
+]
+
+
+class Method(object):
+
+ def __init__(self, name, **kwargs):
+ self.name = name
+ self.kwargs = kwargs or None
+ self.__name__ = name # for Plex tracing
+
+ def __call__(self, stream, text):
+ method = getattr(stream, self.name)
+ # self.kwargs is almost always unused => avoid call overhead
+ return method(text, **self.kwargs) if self.kwargs is not None else method(text)
+
+ def __copy__(self):
+ return self # immutable, no need to copy
+
+ def __deepcopy__(self, memo):
+ return self # immutable, no need to copy
+
+
+#------------------------------------------------------------------
+
+class CompileTimeScope(object):
+
+ def __init__(self, outer=None):
+ self.entries = {}
+ self.outer = outer
+
+ def declare(self, name, value):
+ self.entries[name] = value
+
+ def update(self, other):
+ self.entries.update(other)
+
+ def lookup_here(self, name):
+ return self.entries[name]
+
+ def __contains__(self, name):
+ return name in self.entries
+
+ def lookup(self, name):
+ try:
+ return self.lookup_here(name)
+ except KeyError:
+ outer = self.outer
+ if outer:
+ return outer.lookup(name)
+ else:
+ raise
+
+
+def initial_compile_time_env():
+ benv = CompileTimeScope()
+ names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE')
+ for name, value in zip(names, platform.uname()):
+ benv.declare(name, value)
+ try:
+ import __builtin__ as builtins
+ except ImportError:
+ import builtins
+
+ names = (
+ 'False', 'True',
+ 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes',
+ 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter',
+ 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len',
+ 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range',
+ 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str',
+ 'sum', 'tuple', 'zip',
+ ### defined below in a platform independent way
+ # 'long', 'unicode', 'reduce', 'xrange'
+ )
+
+ for name in names:
+ try:
+ benv.declare(name, getattr(builtins, name))
+ except AttributeError:
+ # ignore, likely Py3
+ pass
+
+ # Py2/3 adaptations
+ from functools import reduce
+ benv.declare('reduce', reduce)
+ benv.declare('unicode', getattr(builtins, 'unicode', getattr(builtins, 'str')))
+ benv.declare('long', getattr(builtins, 'long', getattr(builtins, 'int')))
+ benv.declare('xrange', getattr(builtins, 'xrange', getattr(builtins, 'range')))
+
+ denv = CompileTimeScope(benv)
+ return denv
+
+
+#------------------------------------------------------------------
+
+class SourceDescriptor(object):
+ """
+ A SourceDescriptor should be considered immutable.
+ """
+ filename = None
+
+ _file_type = 'pyx'
+
+ _escaped_description = None
+ _cmp_name = ''
+ def __str__(self):
+ assert False # To catch all places where a descriptor is used directly as a filename
+
+ def set_file_type_from_name(self, filename):
+ name, ext = os.path.splitext(filename)
+ self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx'
+
+ def is_cython_file(self):
+ return self._file_type in ('pyx', 'pxd')
+
+ def is_python_file(self):
+ return self._file_type == 'py'
+
+ def get_escaped_description(self):
+ if self._escaped_description is None:
+ esc_desc = \
+ self.get_description().encode('ASCII', 'replace').decode("ASCII")
+ # Use forward slashes on Windows since these paths
+ # will be used in the #line directives in the C/C++ files.
+ self._escaped_description = esc_desc.replace('\\', '/')
+ return self._escaped_description
+
+ def __gt__(self, other):
+ # this is only used to provide some sort of order
+ try:
+ return self._cmp_name > other._cmp_name
+ except AttributeError:
+ return False
+
+ def __lt__(self, other):
+ # this is only used to provide some sort of order
+ try:
+ return self._cmp_name < other._cmp_name
+ except AttributeError:
+ return False
+
+ def __le__(self, other):
+ # this is only used to provide some sort of order
+ try:
+ return self._cmp_name <= other._cmp_name
+ except AttributeError:
+ return False
+
+ def __copy__(self):
+ return self # immutable, no need to copy
+
+ def __deepcopy__(self, memo):
+ return self # immutable, no need to copy
+
+
+class FileSourceDescriptor(SourceDescriptor):
+ """
+ Represents a code source. A code source is a more generic abstraction
+ for a "filename" (as sometimes the code doesn't come from a file).
+ Instances of code sources are passed to Scanner.__init__ as the
+ optional name argument and will be passed back when asking for
+ the position()-tuple.
+ """
+ def __init__(self, filename, path_description=None):
+ filename = Utils.decode_filename(filename)
+ self.path_description = path_description or filename
+ self.filename = filename
+ # Prefer relative paths to current directory (which is most likely the project root) over absolute paths.
+ workdir = os.path.abspath('.') + os.sep
+ self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename
+ self.set_file_type_from_name(filename)
+ self._cmp_name = filename
+ self._lines = {}
+
+ def get_lines(self, encoding=None, error_handling=None):
+ # we cache the lines only the second time this is called, in
+ # order to save memory when they are only used once
+ key = (encoding, error_handling)
+ try:
+ lines = self._lines[key]
+ if lines is not None:
+ return lines
+ except KeyError:
+ pass
+
+ with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f:
+ lines = list(f)
+
+ if key in self._lines:
+ self._lines[key] = lines
+ else:
+ # do not cache the first access, but remember that we
+ # already read it once
+ self._lines[key] = None
+ return lines
+
+ def get_description(self):
+ # Dump path_description, it's already arcadia root relative (required for proper file matching in coverage)
+ return self.path_description
+ try:
+ return os.path.relpath(self.path_description)
+ except ValueError:
+ # path not under current directory => use complete file path
+ return self.path_description
+
+ def get_error_description(self):
+ path = self.filename
+ cwd = Utils.decode_filename(os.getcwd() + os.path.sep)
+ if path.startswith(cwd):
+ return path[len(cwd):]
+ return path
+
+ def get_filenametable_entry(self):
+ return self.file_path
+
+ def __eq__(self, other):
+ return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
+
+ def __hash__(self):
+ return hash(self.filename)
+
+ def __repr__(self):
+ return "<FileSourceDescriptor:%s>" % self.filename
+
+
+class StringSourceDescriptor(SourceDescriptor):
+ """
+ Instances of this class can be used instead of a filenames if the
+ code originates from a string object.
+ """
+ def __init__(self, name, code):
+ self.name = name
+ #self.set_file_type_from_name(name)
+ self.codelines = [x + "\n" for x in code.split("\n")]
+ self._cmp_name = name
+
+ def get_lines(self, encoding=None, error_handling=None):
+ if not encoding:
+ return self.codelines
+ else:
+ return [line.encode(encoding, error_handling).decode(encoding)
+ for line in self.codelines]
+
+ def get_description(self):
+ return self.name
+
+ get_error_description = get_description
+
+ def get_filenametable_entry(self):
+ return "stringsource"
+
+ def __hash__(self):
+ return id(self)
+ # Do not hash on the name, an identical string source should be the
+ # same object (name is often defaulted in other places)
+ # return hash(self.name)
+
+ def __eq__(self, other):
+ return isinstance(other, StringSourceDescriptor) and self.name == other.name
+
+ def __repr__(self):
+ return "<StringSourceDescriptor:%s>" % self.name
+
+
+#------------------------------------------------------------------
+
+class PyrexScanner(Scanner):
+ # context Context Compilation context
+ # included_files [string] Files included with 'include' statement
+ # compile_time_env dict Environment for conditional compilation
+ # compile_time_eval boolean In a true conditional compilation context
+ # compile_time_expr boolean In a compile-time expression context
+
+ def __init__(self, file, filename, parent_scanner=None,
+ scope=None, context=None, source_encoding=None, parse_comments=True, initial_pos=None):
+ Scanner.__init__(self, get_lexicon(), file, filename, initial_pos)
+
+ if filename.is_python_file():
+ self.in_python_file = True
+ self.keywords = set(py_reserved_words)
+ else:
+ self.in_python_file = False
+ self.keywords = set(pyx_reserved_words)
+
+ self.async_enabled = 0
+
+ if parent_scanner:
+ self.context = parent_scanner.context
+ self.included_files = parent_scanner.included_files
+ self.compile_time_env = parent_scanner.compile_time_env
+ self.compile_time_eval = parent_scanner.compile_time_eval
+ self.compile_time_expr = parent_scanner.compile_time_expr
+
+ if parent_scanner.async_enabled:
+ self.enter_async()
+ else:
+ self.context = context
+ self.included_files = scope.included_files
+ self.compile_time_env = initial_compile_time_env()
+ self.compile_time_eval = 1
+ self.compile_time_expr = 0
+ if getattr(context.options, 'compile_time_env', None):
+ self.compile_time_env.update(context.options.compile_time_env)
+ self.parse_comments = parse_comments
+ self.source_encoding = source_encoding
+ self.trace = trace_scanner
+ self.indentation_stack = [0]
+ self.indentation_char = None
+ self.bracket_nesting_level = 0
+
+ self.begin('INDENT')
+ self.sy = ''
+ self.next()
+
+ def commentline(self, text):
+ if self.parse_comments:
+ self.produce('commentline', text)
+
+ def strip_underscores(self, text, symbol):
+ self.produce(symbol, text.replace('_', ''))
+
+ def current_level(self):
+ return self.indentation_stack[-1]
+
+ def open_bracket_action(self, text):
+ self.bracket_nesting_level += 1
+ return text
+
+ def close_bracket_action(self, text):
+ self.bracket_nesting_level -= 1
+ return text
+
+ def newline_action(self, text):
+ if self.bracket_nesting_level == 0:
+ self.begin('INDENT')
+ self.produce('NEWLINE', '')
+
+ string_states = {
+ "'": 'SQ_STRING',
+ '"': 'DQ_STRING',
+ "'''": 'TSQ_STRING',
+ '"""': 'TDQ_STRING'
+ }
+
+ def begin_string_action(self, text):
+ while text[:1] in any_string_prefix:
+ text = text[1:]
+ self.begin(self.string_states[text])
+ self.produce('BEGIN_STRING')
+
+ def end_string_action(self, text):
+ self.begin('')
+ self.produce('END_STRING')
+
+ def unclosed_string_action(self, text):
+ self.end_string_action(text)
+ self.error("Unclosed string literal")
+
+ def indentation_action(self, text):
+ self.begin('')
+ # Indentation within brackets should be ignored.
+ #if self.bracket_nesting_level > 0:
+ # return
+ # Check that tabs and spaces are being used consistently.
+ if text:
+ c = text[0]
+ #print "Scanner.indentation_action: indent with", repr(c) ###
+ if self.indentation_char is None:
+ self.indentation_char = c
+ #print "Scanner.indentation_action: setting indent_char to", repr(c)
+ else:
+ if self.indentation_char != c:
+ self.error("Mixed use of tabs and spaces")
+ if text.replace(c, "") != "":
+ self.error("Mixed use of tabs and spaces")
+ # Figure out how many indents/dedents to do
+ current_level = self.current_level()
+ new_level = len(text)
+ #print "Changing indent level from", current_level, "to", new_level ###
+ if new_level == current_level:
+ return
+ elif new_level > current_level:
+ #print "...pushing level", new_level ###
+ self.indentation_stack.append(new_level)
+ self.produce('INDENT', '')
+ else:
+ while new_level < self.current_level():
+ #print "...popping level", self.indentation_stack[-1] ###
+ self.indentation_stack.pop()
+ self.produce('DEDENT', '')
+ #print "...current level now", self.current_level() ###
+ if new_level != self.current_level():
+ self.error("Inconsistent indentation")
+
+ def eof_action(self, text):
+ while len(self.indentation_stack) > 1:
+ self.produce('DEDENT', '')
+ self.indentation_stack.pop()
+ self.produce('EOF', '')
+
+ def next(self):
+ try:
+ sy, systring = self.read()
+ except UnrecognizedInput:
+ self.error("Unrecognized character")
+ return # just a marker, error() always raises
+ if sy == IDENT:
+ if systring in self.keywords:
+ if systring == u'print' and print_function in self.context.future_directives:
+ self.keywords.discard('print')
+ elif systring == u'exec' and self.context.language_level >= 3:
+ self.keywords.discard('exec')
+ else:
+ sy = systring
+ systring = self.context.intern_ustring(systring)
+ self.sy = sy
+ self.systring = systring
+ if False: # debug_scanner:
+ _, line, col = self.position()
+ if not self.systring or self.sy == self.systring:
+ t = self.sy
+ else:
+ t = "%s %s" % (self.sy, self.systring)
+ print("--- %3d %2d %s" % (line, col, t))
+
+ def peek(self):
+ saved = self.sy, self.systring
+ self.next()
+ next = self.sy, self.systring
+ self.unread(*next)
+ self.sy, self.systring = saved
+ return next
+
+ def put_back(self, sy, systring):
+ self.unread(self.sy, self.systring)
+ self.sy = sy
+ self.systring = systring
+
+ def unread(self, token, value):
+ # This method should be added to Plex
+ self.queue.insert(0, (token, value))
+
+ def error(self, message, pos=None, fatal=True):
+ if pos is None:
+ pos = self.position()
+ if self.sy == 'INDENT':
+ error(pos, "Possible inconsistent indentation")
+ err = error(pos, message)
+ if fatal: raise err
+
+ def expect(self, what, message=None):
+ if self.sy == what:
+ self.next()
+ else:
+ self.expected(what, message)
+
+ def expect_keyword(self, what, message=None):
+ if self.sy == IDENT and self.systring == what:
+ self.next()
+ else:
+ self.expected(what, message)
+
+ def expected(self, what, message=None):
+ if message:
+ self.error(message)
+ else:
+ if self.sy == IDENT:
+ found = self.systring
+ else:
+ found = self.sy
+ self.error("Expected '%s', found '%s'" % (what, found))
+
+ def expect_indent(self):
+ self.expect('INDENT', "Expected an increase in indentation level")
+
+ def expect_dedent(self):
+ self.expect('DEDENT', "Expected a decrease in indentation level")
+
+ def expect_newline(self, message="Expected a newline", ignore_semicolon=False):
+ # Expect either a newline or end of file
+ useless_trailing_semicolon = None
+ if ignore_semicolon and self.sy == ';':
+ useless_trailing_semicolon = self.position()
+ self.next()
+ if self.sy != 'EOF':
+ self.expect('NEWLINE', message)
+ if useless_trailing_semicolon is not None:
+ warning(useless_trailing_semicolon, "useless trailing semicolon")
+
+ def enter_async(self):
+ self.async_enabled += 1
+ if self.async_enabled == 1:
+ self.keywords.add('async')
+ self.keywords.add('await')
+
+ def exit_async(self):
+ assert self.async_enabled > 0
+ self.async_enabled -= 1
+ if not self.async_enabled:
+ self.keywords.discard('await')
+ self.keywords.discard('async')
+ if self.sy in ('async', 'await'):
+ self.sy, self.systring = IDENT, self.context.intern_ustring(self.sy)
diff --git a/contrib/tools/cython/Cython/Compiler/StringEncoding.py b/contrib/tools/cython/Cython/Compiler/StringEncoding.py
new file mode 100644
index 0000000000..c37e8aab79
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/StringEncoding.py
@@ -0,0 +1,363 @@
+#
+# Cython -- encoding related tools
+#
+
+from __future__ import absolute_import
+
+import re
+import sys
+
+if sys.version_info[0] >= 3:
+ _unicode, _str, _bytes, _unichr = str, str, bytes, chr
+ IS_PYTHON3 = True
+else:
+ _unicode, _str, _bytes, _unichr = unicode, str, str, unichr
+ IS_PYTHON3 = False
+
+empty_bytes = _bytes()
+empty_unicode = _unicode()
+
+join_bytes = empty_bytes.join
+
+
+class UnicodeLiteralBuilder(object):
+ """Assemble a unicode string.
+ """
+ def __init__(self):
+ self.chars = []
+
+ def append(self, characters):
+ if isinstance(characters, _bytes):
+ # this came from a Py2 string literal in the parser code
+ characters = characters.decode("ASCII")
+ assert isinstance(characters, _unicode), str(type(characters))
+ self.chars.append(characters)
+
+ if sys.maxunicode == 65535:
+ def append_charval(self, char_number):
+ if char_number > 65535:
+ # wide Unicode character on narrow platform => replace
+ # by surrogate pair
+ char_number -= 0x10000
+ self.chars.append( _unichr((char_number // 1024) + 0xD800) )
+ self.chars.append( _unichr((char_number % 1024) + 0xDC00) )
+ else:
+ self.chars.append( _unichr(char_number) )
+ else:
+ def append_charval(self, char_number):
+ self.chars.append( _unichr(char_number) )
+
+ def append_uescape(self, char_number, escape_string):
+ self.append_charval(char_number)
+
+ def getstring(self):
+ return EncodedString(u''.join(self.chars))
+
+ def getstrings(self):
+ return (None, self.getstring())
+
+
+class BytesLiteralBuilder(object):
+ """Assemble a byte string or char value.
+ """
+ def __init__(self, target_encoding):
+ self.chars = []
+ self.target_encoding = target_encoding
+
+ def append(self, characters):
+ if isinstance(characters, _unicode):
+ characters = characters.encode(self.target_encoding)
+ assert isinstance(characters, _bytes), str(type(characters))
+ self.chars.append(characters)
+
+ def append_charval(self, char_number):
+ self.chars.append( _unichr(char_number).encode('ISO-8859-1') )
+
+ def append_uescape(self, char_number, escape_string):
+ self.append(escape_string)
+
+ def getstring(self):
+ # this *must* return a byte string!
+ return bytes_literal(join_bytes(self.chars), self.target_encoding)
+
+ def getchar(self):
+ # this *must* return a byte string!
+ return self.getstring()
+
+ def getstrings(self):
+ return (self.getstring(), None)
+
+
+class StrLiteralBuilder(object):
+ """Assemble both a bytes and a unicode representation of a string.
+ """
+ def __init__(self, target_encoding):
+ self._bytes = BytesLiteralBuilder(target_encoding)
+ self._unicode = UnicodeLiteralBuilder()
+
+ def append(self, characters):
+ self._bytes.append(characters)
+ self._unicode.append(characters)
+
+ def append_charval(self, char_number):
+ self._bytes.append_charval(char_number)
+ self._unicode.append_charval(char_number)
+
+ def append_uescape(self, char_number, escape_string):
+ self._bytes.append(escape_string)
+ self._unicode.append_charval(char_number)
+
+ def getstrings(self):
+ return (self._bytes.getstring(), self._unicode.getstring())
+
+
+class EncodedString(_unicode):
+ # unicode string subclass to keep track of the original encoding.
+ # 'encoding' is None for unicode strings and the source encoding
+ # otherwise
+ encoding = None
+
+ def __deepcopy__(self, memo):
+ return self
+
+ def byteencode(self):
+ assert self.encoding is not None
+ return self.encode(self.encoding)
+
+ def utf8encode(self):
+ assert self.encoding is None
+ return self.encode("UTF-8")
+
+ @property
+ def is_unicode(self):
+ return self.encoding is None
+
+ def contains_surrogates(self):
+ return string_contains_surrogates(self)
+
+ def as_utf8_string(self):
+ return bytes_literal(self.utf8encode(), 'utf8')
+
+
+def string_contains_surrogates(ustring):
+ """
+ Check if the unicode string contains surrogate code points
+ on a CPython platform with wide (UCS-4) or narrow (UTF-16)
+ Unicode, i.e. characters that would be spelled as two
+ separate code units on a narrow platform.
+ """
+ for c in map(ord, ustring):
+ if c > 65535: # can only happen on wide platforms
+ return True
+ if 0xD800 <= c <= 0xDFFF:
+ return True
+ return False
+
+
+def string_contains_lone_surrogates(ustring):
+ """
+ Check if the unicode string contains lone surrogate code points
+ on a CPython platform with wide (UCS-4) or narrow (UTF-16)
+ Unicode, i.e. characters that would be spelled as two
+ separate code units on a narrow platform, but that do not form a pair.
+ """
+ last_was_start = False
+ unicode_uses_surrogate_encoding = sys.maxunicode == 65535
+ for c in map(ord, ustring):
+ # surrogates tend to be rare
+ if c < 0xD800 or c > 0xDFFF:
+ if last_was_start:
+ return True
+ elif not unicode_uses_surrogate_encoding:
+ # on 32bit Unicode platforms, there is never a pair
+ return True
+ elif c <= 0xDBFF:
+ if last_was_start:
+ return True # lone start
+ last_was_start = True
+ else:
+ if not last_was_start:
+ return True # lone end
+ last_was_start = False
+ return last_was_start
+
+
+class BytesLiteral(_bytes):
+ # bytes subclass that is compatible with EncodedString
+ encoding = None
+
+ def __deepcopy__(self, memo):
+ return self
+
+ def byteencode(self):
+ if IS_PYTHON3:
+ return _bytes(self)
+ else:
+ # fake-recode the string to make it a plain bytes object
+ return self.decode('ISO-8859-1').encode('ISO-8859-1')
+
+ def utf8encode(self):
+ assert False, "this is not a unicode string: %r" % self
+
+ def __str__(self):
+ """Fake-decode the byte string to unicode to support %
+ formatting of unicode strings.
+ """
+ return self.decode('ISO-8859-1')
+
+ is_unicode = False
+
+ def as_c_string_literal(self):
+ value = split_string_literal(escape_byte_string(self))
+ return '"%s"' % value
+
+
+def bytes_literal(s, encoding):
+ assert isinstance(s, bytes)
+ s = BytesLiteral(s)
+ s.encoding = encoding
+ return s
+
+
+def encoded_string(s, encoding):
+ assert isinstance(s, (_unicode, bytes))
+ s = EncodedString(s)
+ if encoding is not None:
+ s.encoding = encoding
+ return s
+
+
+char_from_escape_sequence = {
+ r'\a' : u'\a',
+ r'\b' : u'\b',
+ r'\f' : u'\f',
+ r'\n' : u'\n',
+ r'\r' : u'\r',
+ r'\t' : u'\t',
+ r'\v' : u'\v',
+ }.get
+
+_c_special = ('\\', '??', '"') + tuple(map(chr, range(32)))
+
+
+def _to_escape_sequence(s):
+ if s in '\n\r\t':
+ return repr(s)[1:-1]
+ elif s == '"':
+ return r'\"'
+ elif s == '\\':
+ return r'\\'
+ else:
+ # within a character sequence, oct passes much better than hex
+ return ''.join(['\\%03o' % ord(c) for c in s])
+
+
+def _build_specials_replacer():
+ subexps = []
+ replacements = {}
+ for special in _c_special:
+ regexp = ''.join(['[%s]' % c.replace('\\', '\\\\') for c in special])
+ subexps.append(regexp)
+ replacements[special.encode('ASCII')] = _to_escape_sequence(special).encode('ASCII')
+ sub = re.compile(('(%s)' % '|'.join(subexps)).encode('ASCII')).sub
+ def replace_specials(m):
+ return replacements[m.group(1)]
+ def replace(s):
+ return sub(replace_specials, s)
+ return replace
+
+_replace_specials = _build_specials_replacer()
+
+
+def escape_char(c):
+ if IS_PYTHON3:
+ c = c.decode('ISO-8859-1')
+ if c in '\n\r\t\\':
+ return repr(c)[1:-1]
+ elif c == "'":
+ return "\\'"
+ n = ord(c)
+ if n < 32 or n > 127:
+ # hex works well for characters
+ return "\\x%02X" % n
+ else:
+ return c
+
+def escape_byte_string(s):
+ """Escape a byte string so that it can be written into C code.
+ Note that this returns a Unicode string instead which, when
+ encoded as ISO-8859-1, will result in the correct byte sequence
+ being written.
+ """
+ s = _replace_specials(s)
+ try:
+ return s.decode("ASCII") # trial decoding: plain ASCII => done
+ except UnicodeDecodeError:
+ pass
+ if IS_PYTHON3:
+ s_new = bytearray()
+ append, extend = s_new.append, s_new.extend
+ for b in s:
+ if b >= 128:
+ extend(('\\%3o' % b).encode('ASCII'))
+ else:
+ append(b)
+ return s_new.decode('ISO-8859-1')
+ else:
+ l = []
+ append = l.append
+ for c in s:
+ o = ord(c)
+ if o >= 128:
+ append('\\%3o' % o)
+ else:
+ append(c)
+ return join_bytes(l).decode('ISO-8859-1')
+
+def split_string_literal(s, limit=2000):
+ # MSVC can't handle long string literals.
+ if len(s) < limit:
+ return s
+ else:
+ start = 0
+ chunks = []
+ while start < len(s):
+ end = start + limit
+ if len(s) > end-4 and '\\' in s[end-4:end]:
+ end -= 4 - s[end-4:end].find('\\') # just before the backslash
+ while s[end-1] == '\\':
+ end -= 1
+ if end == start:
+ # must have been a long line of backslashes
+ end = start + limit - (limit % 2) - 4
+ break
+ chunks.append(s[start:end])
+ start = end
+ return '""'.join(chunks)
+
+def encode_pyunicode_string(s):
+ """Create Py_UNICODE[] representation of a given unicode string.
+ """
+ s = list(map(ord, s)) + [0]
+
+ if sys.maxunicode >= 0x10000: # Wide build or Py3.3
+ utf16, utf32 = [], s
+ for code_point in s:
+ if code_point >= 0x10000: # outside of BMP
+ high, low = divmod(code_point - 0x10000, 1024)
+ utf16.append(high + 0xD800)
+ utf16.append(low + 0xDC00)
+ else:
+ utf16.append(code_point)
+ else:
+ utf16, utf32 = s, []
+ for code_unit in s:
+ if 0xDC00 <= code_unit <= 0xDFFF and utf32 and 0xD800 <= utf32[-1] <= 0xDBFF:
+ high, low = utf32[-1], code_unit
+ utf32[-1] = ((high & 0x3FF) << 10) + (low & 0x3FF) + 0x10000
+ else:
+ utf32.append(code_unit)
+
+ if utf16 == utf32:
+ utf16 = []
+ return ",".join(map(_unicode, utf16)), ",".join(map(_unicode, utf32))
diff --git a/contrib/tools/cython/Cython/Compiler/Symtab.py b/contrib/tools/cython/Cython/Compiler/Symtab.py
new file mode 100644
index 0000000000..57d2188cc7
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Symtab.py
@@ -0,0 +1,2552 @@
+#
+# Symbol Table
+#
+
+from __future__ import absolute_import
+
+import re
+import copy
+import operator
+
+try:
+ import __builtin__ as builtins
+except ImportError: # Py3
+ import builtins
+
+from .Errors import warning, error, InternalError
+from .StringEncoding import EncodedString
+from . import Options, Naming
+from . import PyrexTypes
+from .PyrexTypes import py_object_type, unspecified_type
+from .TypeSlots import (
+ pyfunction_signature, pymethod_signature, richcmp_special_methods,
+ get_special_method_signature, get_property_accessor_signature)
+from . import Future
+
+from . import Code
+
+iso_c99_keywords = set(
+['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
+ 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
+ 'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
+ 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
+ 'volatile', 'while',
+ '_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
+
+
+def c_safe_identifier(cname):
+ # There are some C limitations on struct entry names.
+ if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
+ or cname in ('__weakref__', '__dict__')))
+ or cname in iso_c99_keywords):
+ cname = Naming.pyrex_prefix + cname
+ return cname
+
+
+class BufferAux(object):
+ writable_needed = False
+
+ def __init__(self, buflocal_nd_var, rcbuf_var):
+ self.buflocal_nd_var = buflocal_nd_var
+ self.rcbuf_var = rcbuf_var
+
+ def __repr__(self):
+ return "<BufferAux %r>" % self.__dict__
+
+
+class Entry(object):
+ # A symbol table entry in a Scope or ModuleNamespace.
+ #
+ # name string Python name of entity
+ # cname string C name of entity
+ # type PyrexType Type of entity
+ # doc string Doc string
+ # annotation ExprNode PEP 484/526 annotation
+ # init string Initial value
+ # visibility 'private' or 'public' or 'extern'
+ # is_builtin boolean Is an entry in the Python builtins dict
+ # is_cglobal boolean Is a C global variable
+ # is_pyglobal boolean Is a Python module-level variable
+ # or class attribute during
+ # class construction
+ # is_member boolean Is an assigned class member
+ # is_pyclass_attr boolean Is a name in a Python class namespace
+ # is_variable boolean Is a variable
+ # is_cfunction boolean Is a C function
+ # is_cmethod boolean Is a C method of an extension type
+ # is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod)
+ # is_unbound_cmethod boolean Is an unbound C method of an extension type
+ # is_final_cmethod boolean Is non-overridable C method
+ # is_inline_cmethod boolean Is inlined C method
+ # is_anonymous boolean Is a anonymous pyfunction entry
+ # is_type boolean Is a type definition
+ # is_cclass boolean Is an extension class
+ # is_cpp_class boolean Is a C++ class
+ # is_const boolean Is a constant
+ # is_property boolean Is a property of an extension type:
+ # doc_cname string or None C const holding the docstring
+ # getter_cname string C func for getting property
+ # setter_cname string C func for setting or deleting property
+ # is_self_arg boolean Is the "self" arg of an exttype method
+ # is_arg boolean Is the arg of a method
+ # is_local boolean Is a local variable
+ # in_closure boolean Is referenced in an inner scope
+ # in_subscope boolean Belongs to a generator expression scope
+ # is_readonly boolean Can't be assigned to
+ # func_cname string C func implementing Python func
+ # func_modifiers [string] C function modifiers ('inline')
+ # pos position Source position where declared
+ # namespace_cname string If is_pyglobal, the C variable
+ # holding its home namespace
+ # pymethdef_cname string PyMethodDef structure
+ # signature Signature Arg & return types for Python func
+ # as_variable Entry Alternative interpretation of extension
+ # type name or builtin C function as a variable
+ # xdecref_cleanup boolean Use Py_XDECREF for error cleanup
+ # in_cinclude boolean Suppress C declaration code
+ # enum_values [Entry] For enum types, list of values
+ # qualified_name string "modname.funcname" or "modname.classname"
+ # or "modname.classname.funcname"
+ # is_declared_generic boolean Is declared as PyObject * even though its
+ # type is an extension type
+ # as_module None Module scope, if a cimported module
+ # is_inherited boolean Is an inherited attribute of an extension type
+ # pystring_cname string C name of Python version of string literal
+ # is_interned boolean For string const entries, value is interned
+ # is_identifier boolean For string const entries, value is an identifier
+ # used boolean
+ # is_special boolean Is a special method or property accessor
+ # of an extension type
+ # defined_in_pxd boolean Is defined in a .pxd file (not just declared)
+ # api boolean Generate C API for C class or function
+ # utility_code string Utility code needed when this entry is used
+ #
+ # buffer_aux BufferAux or None Extra information needed for buffer variables
+ # inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
+ # Ideally this should not be necessary.
+ # might_overflow boolean In an arithmetic expression that could cause
+ # overflow (used for type inference).
+ # utility_code_definition For some Cython builtins, the utility code
+ # which contains the definition of the entry.
+ # Currently only supported for CythonScope entries.
+ # error_on_uninitialized Have Control Flow issue an error when this entry is
+ # used uninitialized
+ # cf_used boolean Entry is used
+ # is_fused_specialized boolean Whether this entry of a cdef or def function
+ # is a specialization
+
+ # TODO: utility_code and utility_code_definition serves the same purpose...
+
+ inline_func_in_pxd = False
+ borrowed = 0
+ init = ""
+ annotation = None
+ visibility = 'private'
+ is_builtin = 0
+ is_cglobal = 0
+ is_pyglobal = 0
+ is_member = 0
+ is_pyclass_attr = 0
+ is_variable = 0
+ is_cfunction = 0
+ is_cmethod = 0
+ is_builtin_cmethod = False
+ is_unbound_cmethod = 0
+ is_final_cmethod = 0
+ is_inline_cmethod = 0
+ is_anonymous = 0
+ is_type = 0
+ is_cclass = 0
+ is_cpp_class = 0
+ is_const = 0
+ is_property = 0
+ doc_cname = None
+ getter_cname = None
+ setter_cname = None
+ is_self_arg = 0
+ is_arg = 0
+ is_local = 0
+ in_closure = 0
+ from_closure = 0
+ in_subscope = 0
+ is_declared_generic = 0
+ is_readonly = 0
+ pyfunc_cname = None
+ func_cname = None
+ func_modifiers = []
+ final_func_cname = None
+ doc = None
+ as_variable = None
+ xdecref_cleanup = 0
+ in_cinclude = 0
+ as_module = None
+ is_inherited = 0
+ pystring_cname = None
+ is_identifier = 0
+ is_interned = 0
+ used = 0
+ is_special = 0
+ defined_in_pxd = 0
+ is_implemented = 0
+ api = 0
+ utility_code = None
+ is_overridable = 0
+ buffer_aux = None
+ prev_entry = None
+ might_overflow = 0
+ fused_cfunction = None
+ is_fused_specialized = False
+ utility_code_definition = None
+ needs_property = False
+ in_with_gil_block = 0
+ from_cython_utility_code = None
+ error_on_uninitialized = False
+ cf_used = True
+ outer_entry = None
+
+ def __init__(self, name, cname, type, pos = None, init = None):
+ self.name = name
+ self.cname = cname
+ self.type = type
+ self.pos = pos
+ self.init = init
+ self.overloaded_alternatives = []
+ self.cf_assignments = []
+ self.cf_references = []
+ self.inner_entries = []
+ self.defining_entry = self
+
+ def __repr__(self):
+ return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
+
+ def already_declared_here(self):
+ error(self.pos, "Previous declaration is here")
+
+ def redeclared(self, pos):
+ error(pos, "'%s' does not match previous declaration" % self.name)
+ self.already_declared_here()
+
+ def all_alternatives(self):
+ return [self] + self.overloaded_alternatives
+
+ def all_entries(self):
+ return [self] + self.inner_entries
+
+ def __lt__(left, right):
+ if isinstance(left, Entry) and isinstance(right, Entry):
+ return (left.name, left.cname) < (right.name, right.cname)
+ else:
+ return NotImplemented
+
+
+class InnerEntry(Entry):
+ """
+ An entry in a closure scope that represents the real outer Entry.
+ """
+ from_closure = True
+
+ def __init__(self, outer_entry, scope):
+ Entry.__init__(self, outer_entry.name,
+ outer_entry.cname,
+ outer_entry.type,
+ outer_entry.pos)
+ self.outer_entry = outer_entry
+ self.scope = scope
+
+ # share state with (outermost) defining entry
+ outermost_entry = outer_entry
+ while outermost_entry.outer_entry:
+ outermost_entry = outermost_entry.outer_entry
+ self.defining_entry = outermost_entry
+ self.inner_entries = outermost_entry.inner_entries
+ self.cf_assignments = outermost_entry.cf_assignments
+ self.cf_references = outermost_entry.cf_references
+ self.overloaded_alternatives = outermost_entry.overloaded_alternatives
+ self.inner_entries.append(self)
+
+ def __getattr__(self, name):
+ if name.startswith('__'):
+ # we wouldn't have been called if it was there
+ raise AttributeError(name)
+ return getattr(self.defining_entry, name)
+
+ def all_entries(self):
+ return self.defining_entry.all_entries()
+
+
+class Scope(object):
+ # name string Unqualified name
+ # outer_scope Scope or None Enclosing scope
+ # entries {string : Entry} Python name to entry, non-types
+ # const_entries [Entry] Constant entries
+ # type_entries [Entry] Struct/union/enum/typedef/exttype entries
+ # sue_entries [Entry] Struct/union/enum entries
+ # arg_entries [Entry] Function argument entries
+ # var_entries [Entry] User-defined variable entries
+ # pyfunc_entries [Entry] Python function entries
+ # cfunc_entries [Entry] C function entries
+ # c_class_entries [Entry] All extension type entries
+ # cname_to_entry {string : Entry} Temp cname to entry mapping
+ # return_type PyrexType or None Return type of function owning scope
+ # is_builtin_scope boolean Is the builtin scope of Python/Cython
+ # is_py_class_scope boolean Is a Python class scope
+ # is_c_class_scope boolean Is an extension type scope
+ # is_closure_scope boolean Is a closure scope
+ # is_passthrough boolean Outer scope is passed directly
+ # is_cpp_class_scope boolean Is a C++ class scope
+ # is_property_scope boolean Is a extension type property scope
+ # scope_prefix string Disambiguator for C names
+ # in_cinclude boolean Suppress C declaration code
+ # qualified_name string "modname" or "modname.classname"
+ # Python strings in this scope
+ # nogil boolean In a nogil section
+ # directives dict Helper variable for the recursive
+ # analysis, contains directive values.
+ # is_internal boolean Is only used internally (simpler setup)
+
+ is_builtin_scope = 0
+ is_py_class_scope = 0
+ is_c_class_scope = 0
+ is_closure_scope = 0
+ is_genexpr_scope = 0
+ is_passthrough = 0
+ is_cpp_class_scope = 0
+ is_property_scope = 0
+ is_module_scope = 0
+ is_internal = 0
+ scope_prefix = ""
+ in_cinclude = 0
+ nogil = 0
+ fused_to_specific = None
+ return_type = None
+
+ def __init__(self, name, outer_scope, parent_scope):
+ # The outer_scope is the next scope in the lookup chain.
+ # The parent_scope is used to derive the qualified name of this scope.
+ self.name = name
+ self.outer_scope = outer_scope
+ self.parent_scope = parent_scope
+ mangled_name = "%d%s_" % (len(name), name.replace('.', '_dot_'))
+ qual_scope = self.qualifying_scope()
+ if qual_scope:
+ self.qualified_name = qual_scope.qualify_name(name)
+ self.scope_prefix = qual_scope.scope_prefix + mangled_name
+ else:
+ self.qualified_name = EncodedString(name)
+ self.scope_prefix = mangled_name
+ self.entries = {}
+ self.subscopes = set()
+ self.const_entries = []
+ self.type_entries = []
+ self.sue_entries = []
+ self.arg_entries = []
+ self.var_entries = []
+ self.pyfunc_entries = []
+ self.cfunc_entries = []
+ self.c_class_entries = []
+ self.defined_c_classes = []
+ self.imported_c_classes = {}
+ self.cname_to_entry = {}
+ self.string_to_entry = {}
+ self.identifier_to_entry = {}
+ self.num_to_entry = {}
+ self.obj_to_entry = {}
+ self.buffer_entries = []
+ self.lambda_defs = []
+ self.id_counters = {}
+
+ def __deepcopy__(self, memo):
+ return self
+
+ def merge_in(self, other, merge_unused=True, whitelist=None):
+ # Use with care...
+ entries = []
+ for name, entry in other.entries.items():
+ if not whitelist or name in whitelist:
+ if entry.used or merge_unused:
+ entries.append((name, entry))
+
+ self.entries.update(entries)
+
+ for attr in ('const_entries',
+ 'type_entries',
+ 'sue_entries',
+ 'arg_entries',
+ 'var_entries',
+ 'pyfunc_entries',
+ 'cfunc_entries',
+ 'c_class_entries'):
+ self_entries = getattr(self, attr)
+ names = set(e.name for e in self_entries)
+ for entry in getattr(other, attr):
+ if (entry.used or merge_unused) and entry.name not in names:
+ self_entries.append(entry)
+
+ def __str__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
+
+ def qualifying_scope(self):
+ return self.parent_scope
+
+ def mangle(self, prefix, name = None):
+ if name:
+ return "%s%s%s" % (prefix, self.scope_prefix, name)
+ else:
+ return self.parent_scope.mangle(prefix, self.name)
+
+ def mangle_internal(self, name):
+ # Mangle an internal name so as not to clash with any
+ # user-defined name in this scope.
+ prefix = "%s%s_" % (Naming.pyrex_prefix, name)
+ return self.mangle(prefix)
+ #return self.parent_scope.mangle(prefix, self.name)
+
+ def mangle_class_private_name(self, name):
+ if self.parent_scope:
+ return self.parent_scope.mangle_class_private_name(name)
+ return name
+
+ def next_id(self, name=None):
+ # Return a cname fragment that is unique for this module
+ counters = self.global_scope().id_counters
+ try:
+ count = counters[name] + 1
+ except KeyError:
+ count = 0
+ counters[name] = count
+ if name:
+ if not count:
+ # unique names don't need a suffix, reoccurrences will get one
+ return name
+ return '%s%d' % (name, count)
+ else:
+ return '%d' % count
+
+ def global_scope(self):
+ """ Return the module-level scope containing this scope. """
+ return self.outer_scope.global_scope()
+
+ def builtin_scope(self):
+ """ Return the module-level scope containing this scope. """
+ return self.outer_scope.builtin_scope()
+
+ def iter_local_scopes(self):
+ yield self
+ if self.subscopes:
+ for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')):
+ yield scope
+
+ def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0):
+ # Create new entry, and add to dictionary if
+ # name is not None. Reports a warning if already
+ # declared.
+ if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
+ error(pos, 'Buffer types only allowed as function local variables')
+ if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
+ # See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
+ warning(pos, "'%s' is a reserved name in C." % cname, -1)
+ entries = self.entries
+ if name and name in entries and not shadow:
+ old_entry = entries[name]
+
+ # Reject redeclared C++ functions only if they have the same type signature.
+ cpp_override_allowed = False
+ if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
+ for alt_entry in old_entry.all_alternatives():
+ if type == alt_entry.type:
+ if name == '<init>' and not type.args:
+ # Cython pre-declares the no-args constructor - allow later user definitions.
+ cpp_override_allowed = True
+ break
+ else:
+ cpp_override_allowed = True
+
+ if cpp_override_allowed:
+ # C++ function/method overrides with different signatures are ok.
+ pass
+ elif self.is_cpp_class_scope and entries[name].is_inherited:
+ # Likewise ignore inherited classes.
+ pass
+ elif visibility == 'extern':
+ # Silenced outside of "cdef extern" blocks, until we have a safe way to
+ # prevent pxd-defined cpdef functions from ending up here.
+ warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
+ elif visibility != 'ignore':
+ error(pos, "'%s' redeclared " % name)
+ entries[name].already_declared_here()
+ entry = Entry(name, cname, type, pos = pos)
+ entry.in_cinclude = self.in_cinclude
+ entry.create_wrapper = create_wrapper
+ if name:
+ entry.qualified_name = self.qualify_name(name)
+# if name in entries and self.is_cpp():
+# entries[name].overloaded_alternatives.append(entry)
+# else:
+# entries[name] = entry
+ if not shadow:
+ entries[name] = entry
+
+ if type.is_memoryviewslice:
+ from . import MemoryView
+ entry.init = MemoryView.memslice_entry_init
+
+ entry.scope = self
+ entry.visibility = visibility
+ return entry
+
+ def qualify_name(self, name):
+ return EncodedString("%s.%s" % (self.qualified_name, name))
+
+ def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0, create_wrapper = 0):
+ # Add an entry for a named constant.
+ if not cname:
+ if self.in_cinclude or (visibility == 'public' or api):
+ cname = name
+ else:
+ cname = self.mangle(Naming.enum_prefix, name)
+ entry = self.declare(name, cname, type, pos, visibility, create_wrapper = create_wrapper)
+ entry.is_const = 1
+ entry.value_node = value
+ return entry
+
+ def declare_type(self, name, type, pos,
+ cname = None, visibility = 'private', api = 0, defining = 1,
+ shadow = 0, template = 0):
+ # Add an entry for a type definition.
+ if not cname:
+ cname = name
+ entry = self.declare(name, cname, type, pos, visibility, shadow,
+ is_type=True)
+ entry.is_type = 1
+ entry.api = api
+ if defining:
+ self.type_entries.append(entry)
+
+ if not template:
+ type.entry = entry
+
+ # here we would set as_variable to an object representing this type
+ return entry
+
+ def declare_typedef(self, name, base_type, pos, cname = None,
+ visibility = 'private', api = 0):
+ if not cname:
+ if self.in_cinclude or (visibility != 'private' or api):
+ cname = name
+ else:
+ cname = self.mangle(Naming.type_prefix, name)
+ try:
+ if self.is_cpp_class_scope:
+ namespace = self.outer_scope.lookup(self.name).type
+ else:
+ namespace = None
+ type = PyrexTypes.create_typedef_type(name, base_type, cname,
+ (visibility == 'extern'),
+ namespace)
+ except ValueError as e:
+ error(pos, e.args[0])
+ type = PyrexTypes.error_type
+ entry = self.declare_type(name, type, pos, cname,
+ visibility = visibility, api = api)
+ type.qualified_name = entry.qualified_name
+ return entry
+
+ def declare_struct_or_union(self, name, kind, scope,
+ typedef_flag, pos, cname = None,
+ visibility = 'private', api = 0,
+ packed = False):
+ # Add an entry for a struct or union definition.
+ if not cname:
+ if self.in_cinclude or (visibility == 'public' or api):
+ cname = name
+ else:
+ cname = self.mangle(Naming.type_prefix, name)
+ entry = self.lookup_here(name)
+ if not entry:
+ type = PyrexTypes.CStructOrUnionType(
+ name, kind, scope, typedef_flag, cname, packed)
+ entry = self.declare_type(name, type, pos, cname,
+ visibility = visibility, api = api,
+ defining = scope is not None)
+ self.sue_entries.append(entry)
+ type.entry = entry
+ else:
+ if not (entry.is_type and entry.type.is_struct_or_union
+ and entry.type.kind == kind):
+ warning(pos, "'%s' redeclared " % name, 0)
+ elif scope and entry.type.scope:
+ warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
+ else:
+ self.check_previous_typedef_flag(entry, typedef_flag, pos)
+ self.check_previous_visibility(entry, visibility, pos)
+ if scope:
+ entry.type.scope = scope
+ self.type_entries.append(entry)
+ if self.is_cpp_class_scope:
+ entry.type.namespace = self.outer_scope.lookup(self.name).type
+ return entry
+
+ def declare_cpp_class(self, name, scope,
+ pos, cname = None, base_classes = (),
+ visibility = 'extern', templates = None):
+ if cname is None:
+ if self.in_cinclude or (visibility != 'private'):
+ cname = name
+ else:
+ cname = self.mangle(Naming.type_prefix, name)
+ base_classes = list(base_classes)
+ entry = self.lookup_here(name)
+ if not entry:
+ type = PyrexTypes.CppClassType(
+ name, scope, cname, base_classes, templates = templates)
+ entry = self.declare_type(name, type, pos, cname,
+ visibility = visibility, defining = scope is not None)
+ self.sue_entries.append(entry)
+ else:
+ if not (entry.is_type and entry.type.is_cpp_class):
+ error(pos, "'%s' redeclared " % name)
+ entry.already_declared_here()
+ return None
+ elif scope and entry.type.scope:
+ warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
+ else:
+ if scope:
+ entry.type.scope = scope
+ self.type_entries.append(entry)
+ if base_classes:
+ if entry.type.base_classes and entry.type.base_classes != base_classes:
+ error(pos, "Base type does not match previous declaration")
+ entry.already_declared_here()
+ else:
+ entry.type.base_classes = base_classes
+ if templates or entry.type.templates:
+ if templates != entry.type.templates:
+ error(pos, "Template parameters do not match previous declaration")
+ entry.already_declared_here()
+
+ def declare_inherited_attributes(entry, base_classes):
+ for base_class in base_classes:
+ if base_class is PyrexTypes.error_type:
+ continue
+ if base_class.scope is None:
+ error(pos, "Cannot inherit from incomplete type")
+ else:
+ declare_inherited_attributes(entry, base_class.base_classes)
+ entry.type.scope.declare_inherited_cpp_attributes(base_class)
+ if scope:
+ declare_inherited_attributes(entry, base_classes)
+ scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
+ if self.is_cpp_class_scope:
+ entry.type.namespace = self.outer_scope.lookup(self.name).type
+ return entry
+
+ def check_previous_typedef_flag(self, entry, typedef_flag, pos):
+ if typedef_flag != entry.type.typedef_flag:
+ error(pos, "'%s' previously declared using '%s'" % (
+ entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
+
+ def check_previous_visibility(self, entry, visibility, pos):
+ if entry.visibility != visibility:
+ error(pos, "'%s' previously declared as '%s'" % (
+ entry.name, entry.visibility))
+
+ def declare_enum(self, name, pos, cname, typedef_flag,
+ visibility = 'private', api = 0, create_wrapper = 0):
+ if name:
+ if not cname:
+ if (self.in_cinclude or visibility == 'public'
+ or visibility == 'extern' or api):
+ cname = name
+ else:
+ cname = self.mangle(Naming.type_prefix, name)
+ if self.is_cpp_class_scope:
+ namespace = self.outer_scope.lookup(self.name).type
+ else:
+ namespace = None
+ type = PyrexTypes.CEnumType(name, cname, typedef_flag, namespace)
+ else:
+ type = PyrexTypes.c_anon_enum_type
+ entry = self.declare_type(name, type, pos, cname = cname,
+ visibility = visibility, api = api)
+ entry.create_wrapper = create_wrapper
+ entry.enum_values = []
+ self.sue_entries.append(entry)
+ return entry
+
+ def declare_tuple_type(self, pos, components):
+ return self.outer_scope.declare_tuple_type(pos, components)
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0):
+ # Add an entry for a variable.
+ if not cname:
+ if visibility != 'private' or api:
+ cname = name
+ else:
+ cname = self.mangle(Naming.var_prefix, name)
+ if type.is_cpp_class and visibility != 'extern':
+ type.check_nullary_constructor(pos)
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_variable = 1
+ if in_pxd and visibility != 'extern':
+ entry.defined_in_pxd = 1
+ entry.used = 1
+ if api:
+ entry.api = 1
+ entry.used = 1
+ return entry
+
+ def declare_builtin(self, name, pos):
+ return self.outer_scope.declare_builtin(name, pos)
+
+ def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
+ if entry and not entry.type.is_cfunction:
+ error(pos, "'%s' already declared" % name)
+ error(entry.pos, "Previous declaration is here")
+ entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
+ entry.signature = pyfunction_signature
+ self.pyfunc_entries.append(entry)
+ return entry
+
+ def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
+ # Add an entry for a Python function.
+ entry = self.lookup_here(name)
+ if not allow_redefine:
+ return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
+ if entry:
+ if entry.type.is_unspecified:
+ entry.type = py_object_type
+ elif entry.type is not py_object_type:
+ return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
+ else: # declare entry stub
+ self.declare_var(name, py_object_type, pos, visibility=visibility)
+ entry = self.declare_var(None, py_object_type, pos,
+ cname=name, visibility='private')
+ entry.name = EncodedString(name)
+ entry.qualified_name = self.qualify_name(name)
+ entry.signature = pyfunction_signature
+ entry.is_anonymous = True
+ return entry
+
+ def declare_lambda_function(self, lambda_name, pos):
+ # Add an entry for an anonymous Python function.
+ func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name)
+ pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name)
+ qualified_name = self.qualify_name(lambda_name)
+
+ entry = self.declare(None, func_cname, py_object_type, pos, 'private')
+ entry.name = lambda_name
+ entry.qualified_name = qualified_name
+ entry.pymethdef_cname = pymethdef_cname
+ entry.func_cname = func_cname
+ entry.signature = pyfunction_signature
+ entry.is_anonymous = True
+ return entry
+
+ def add_lambda_def(self, def_node):
+ self.lambda_defs.append(def_node)
+
+ def register_pyfunction(self, entry):
+ self.pyfunc_entries.append(entry)
+
+ def declare_cfunction(self, name, type, pos,
+ cname=None, visibility='private', api=0, in_pxd=0,
+ defining=0, modifiers=(), utility_code=None, overridable=False):
+ # Add an entry for a C function.
+ if not cname:
+ if visibility != 'private' or api:
+ cname = name
+ else:
+ cname = self.mangle(Naming.func_prefix, name)
+ entry = self.lookup_here(name)
+ if entry:
+ if not in_pxd and visibility != entry.visibility and visibility == 'extern':
+ # Previously declared, but now extern => treat this
+ # as implementing the function, using the new cname
+ defining = True
+ visibility = entry.visibility
+ entry.cname = cname
+ entry.func_cname = cname
+ if visibility != 'private' and visibility != entry.visibility:
+ warning(pos, "Function '%s' previously declared as '%s', now as '%s'" % (name, entry.visibility, visibility), 1)
+ if overridable != entry.is_overridable:
+ warning(pos, "Function '%s' previously declared as '%s'" % (
+ name, 'cpdef' if overridable else 'cdef'), 1)
+ if entry.type.same_as(type):
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
+ else:
+ if visibility == 'extern' and entry.visibility == 'extern':
+ can_override = False
+ if self.is_cpp():
+ can_override = True
+ elif cname:
+ # if all alternatives have different cnames,
+ # it's safe to allow signature overrides
+ for alt_entry in entry.all_alternatives():
+ if not alt_entry.cname or cname == alt_entry.cname:
+ break # cname not unique!
+ else:
+ can_override = True
+ if can_override:
+ temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
+ temp.overloaded_alternatives = entry.all_alternatives()
+ entry = temp
+ else:
+ warning(pos, "Function signature does not match previous declaration", 1)
+ entry.type = type
+ elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
+ # TODO: check that this was done by a signature optimisation and not a user error.
+ #warning(pos, "Function signature does not match previous declaration", 1)
+ entry.type = type
+ else:
+ error(pos, "Function signature does not match previous declaration")
+ else:
+ entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
+ entry.func_cname = cname
+ entry.is_overridable = overridable
+ if in_pxd and visibility != 'extern':
+ entry.defined_in_pxd = 1
+ if api:
+ entry.api = 1
+ if not defining and not in_pxd and visibility != 'extern':
+ error(pos, "Non-extern C function '%s' declared but not defined" % name)
+ if defining:
+ entry.is_implemented = True
+ if modifiers:
+ entry.func_modifiers = modifiers
+ if utility_code:
+ assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
+ entry.utility_code = utility_code
+ if overridable:
+ # names of cpdef functions can be used as variables and can be assigned to
+ var_entry = Entry(name, cname, py_object_type) # FIXME: cname?
+ var_entry.qualified_name = self.qualify_name(name)
+ var_entry.is_variable = 1
+ var_entry.is_pyglobal = 1
+ var_entry.scope = entry.scope
+ entry.as_variable = var_entry
+ type.entry = entry
+ return entry
+
+ def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
+ # Add a C function entry without giving it a func_cname.
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_cfunction = 1
+ if modifiers:
+ entry.func_modifiers = modifiers
+ if inherited or type.is_fused:
+ self.cfunc_entries.append(entry)
+ else:
+ # For backwards compatibility reasons, we must keep all non-fused methods
+ # before all fused methods, but separately for each type.
+ i = len(self.cfunc_entries)
+ for cfunc_entry in reversed(self.cfunc_entries):
+ if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
+ break
+ i -= 1
+ self.cfunc_entries.insert(i, entry)
+ return entry
+
+ def find(self, name, pos):
+ # Look up name, report error if not found.
+ entry = self.lookup(name)
+ if entry:
+ return entry
+ else:
+ error(pos, "'%s' is not declared" % name)
+
+ def find_imported_module(self, path, pos):
+ # Look up qualified name, must be a module, report error if not found.
+ # Path is a list of names.
+ scope = self
+ for name in path:
+ entry = scope.find(name, pos)
+ if not entry:
+ return None
+ if entry.as_module:
+ scope = entry.as_module
+ else:
+ error(pos, "'%s' is not a cimported module" % '.'.join(path))
+ return None
+ return scope
+
+ def lookup(self, name):
+ # Look up name in this scope or an enclosing one.
+ # Return None if not found.
+ return (self.lookup_here(name)
+ or (self.outer_scope and self.outer_scope.lookup(name))
+ or None)
+
+ def lookup_here(self, name):
+ # Look up in this scope only, return None if not found.
+ return self.entries.get(name, None)
+
+ def lookup_target(self, name):
+ # Look up name in this scope only. Declare as Python
+ # variable if not found.
+ entry = self.lookup_here(name)
+ if not entry:
+ entry = self.declare_var(name, py_object_type, None)
+ return entry
+
+ def lookup_type(self, name):
+ entry = self.lookup(name)
+ if entry and entry.is_type:
+ if entry.type.is_fused and self.fused_to_specific:
+ return entry.type.specialize(self.fused_to_specific)
+ return entry.type
+
+ def lookup_operator(self, operator, operands):
+ if operands[0].type.is_cpp_class:
+ obj_type = operands[0].type
+ method = obj_type.scope.lookup("operator%s" % operator)
+ if method is not None:
+ arg_types = [arg.type for arg in operands[1:]]
+ res = PyrexTypes.best_match([arg.type for arg in operands[1:]],
+ method.all_alternatives())
+ if res is not None:
+ return res
+ function = self.lookup("operator%s" % operator)
+ function_alternatives = []
+ if function is not None:
+ function_alternatives = function.all_alternatives()
+
+ # look-up nonmember methods listed within a class
+ method_alternatives = []
+ if len(operands)==2: # binary operators only
+ for n in range(2):
+ if operands[n].type.is_cpp_class:
+ obj_type = operands[n].type
+ method = obj_type.scope.lookup("operator%s" % operator)
+ if method is not None:
+ method_alternatives += method.all_alternatives()
+
+ if (not method_alternatives) and (not function_alternatives):
+ return None
+
+ # select the unique alternatives
+ all_alternatives = list(set(method_alternatives + function_alternatives))
+
+ return PyrexTypes.best_match([arg.type for arg in operands],
+ all_alternatives)
+
+ def lookup_operator_for_types(self, pos, operator, types):
+ from .Nodes import Node
+ class FakeOperand(Node):
+ pass
+ operands = [FakeOperand(pos, type=type) for type in types]
+ return self.lookup_operator(operator, operands)
+
+ def use_utility_code(self, new_code):
+ self.global_scope().use_utility_code(new_code)
+
+ def use_entry_utility_code(self, entry):
+ self.global_scope().use_entry_utility_code(entry)
+
+ def defines_any(self, names):
+ # Test whether any of the given names are defined in this scope.
+ for name in names:
+ if name in self.entries:
+ return 1
+ return 0
+
+ def defines_any_special(self, names):
+ # Test whether any of the given names are defined as special methods in this scope.
+ for name in names:
+ if name in self.entries and self.entries[name].is_special:
+ return 1
+ return 0
+
+ def infer_types(self):
+ from .TypeInference import get_type_inferer
+ get_type_inferer().infer_types(self)
+
+ def is_cpp(self):
+ outer = self.outer_scope
+ if outer is None:
+ return False
+ else:
+ return outer.is_cpp()
+
+ def add_include_file(self, filename, verbatim_include=None, late=False):
+ self.outer_scope.add_include_file(filename, verbatim_include, late)
+
+
+class PreImportScope(Scope):
+
+ namespace_cname = Naming.preimport_cname
+
+ def __init__(self):
+ Scope.__init__(self, Options.pre_import, None, None)
+
+ def declare_builtin(self, name, pos):
+ entry = self.declare(name, name, py_object_type, pos, 'private')
+ entry.is_variable = True
+ entry.is_pyglobal = True
+ return entry
+
+
+class BuiltinScope(Scope):
+ # The builtin namespace.
+
+ is_builtin_scope = True
+
+ def __init__(self):
+ if Options.pre_import is None:
+ Scope.__init__(self, "__builtin__", None, None)
+ else:
+ Scope.__init__(self, "__builtin__", PreImportScope(), None)
+ self.type_names = {}
+
+ for name, definition in sorted(self.builtin_entries.items()):
+ cname, type = definition
+ self.declare_var(name, type, None, cname)
+
+ def lookup(self, name, language_level=None, str_is_str=None):
+ # 'language_level' and 'str_is_str' are passed by ModuleScope
+ if name == 'str':
+ if str_is_str is None:
+ str_is_str = language_level in (None, 2)
+ if not str_is_str:
+ name = 'unicode'
+ return Scope.lookup(self, name)
+
+ def declare_builtin(self, name, pos):
+ if not hasattr(builtins, name):
+ if self.outer_scope is not None:
+ return self.outer_scope.declare_builtin(name, pos)
+ else:
+ if Options.error_on_unknown_names:
+ error(pos, "undeclared name not builtin: %s" % name)
+ else:
+ warning(pos, "undeclared name not builtin: %s" % name, 2)
+
+ def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None):
+ # If python_equiv == "*", the Python equivalent has the same name
+ # as the entry, otherwise it has the name specified by python_equiv.
+ name = EncodedString(name)
+ entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
+ utility_code=utility_code)
+ if python_equiv:
+ if python_equiv == "*":
+ python_equiv = name
+ else:
+ python_equiv = EncodedString(python_equiv)
+ var_entry = Entry(python_equiv, python_equiv, py_object_type)
+ var_entry.qualified_name = self.qualify_name(name)
+ var_entry.is_variable = 1
+ var_entry.is_builtin = 1
+ var_entry.utility_code = utility_code
+ var_entry.scope = entry.scope
+ entry.as_variable = var_entry
+ return entry
+
+ def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
+ name = EncodedString(name)
+ type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
+ scope = CClassScope(name, outer_scope=None, visibility='extern')
+ scope.directives = {}
+ if name == 'bool':
+ type.is_final_type = True
+ type.set_scope(scope)
+ self.type_names[name] = 1
+ entry = self.declare_type(name, type, None, visibility='extern')
+ entry.utility_code = utility_code
+
+ var_entry = Entry(name = entry.name,
+ type = self.lookup('type').type, # make sure "type" is the first type declared...
+ pos = entry.pos,
+ cname = entry.type.typeptr_cname)
+ var_entry.qualified_name = self.qualify_name(name)
+ var_entry.is_variable = 1
+ var_entry.is_cglobal = 1
+ var_entry.is_readonly = 1
+ var_entry.is_builtin = 1
+ var_entry.utility_code = utility_code
+ var_entry.scope = self
+ if Options.cache_builtins:
+ var_entry.is_const = True
+ entry.as_variable = var_entry
+
+ return type
+
+ def builtin_scope(self):
+ return self
+
+ builtin_entries = {
+
+ "type": ["((PyObject*)&PyType_Type)", py_object_type],
+
+ "bool": ["((PyObject*)&PyBool_Type)", py_object_type],
+ "int": ["((PyObject*)&PyInt_Type)", py_object_type],
+ "long": ["((PyObject*)&PyLong_Type)", py_object_type],
+ "float": ["((PyObject*)&PyFloat_Type)", py_object_type],
+ "complex":["((PyObject*)&PyComplex_Type)", py_object_type],
+
+ "bytes": ["((PyObject*)&PyBytes_Type)", py_object_type],
+ "bytearray": ["((PyObject*)&PyByteArray_Type)", py_object_type],
+ "str": ["((PyObject*)&PyString_Type)", py_object_type],
+ "unicode":["((PyObject*)&PyUnicode_Type)", py_object_type],
+
+ "tuple": ["((PyObject*)&PyTuple_Type)", py_object_type],
+ "list": ["((PyObject*)&PyList_Type)", py_object_type],
+ "dict": ["((PyObject*)&PyDict_Type)", py_object_type],
+ "set": ["((PyObject*)&PySet_Type)", py_object_type],
+ "frozenset": ["((PyObject*)&PyFrozenSet_Type)", py_object_type],
+
+ "slice": ["((PyObject*)&PySlice_Type)", py_object_type],
+# "file": ["((PyObject*)&PyFile_Type)", py_object_type], # not in Py3
+
+ "None": ["Py_None", py_object_type],
+ "False": ["Py_False", py_object_type],
+ "True": ["Py_True", py_object_type],
+ }
+
+const_counter = 1 # As a temporary solution for compiling code in pxds
+
+class ModuleScope(Scope):
+ # module_name string Python name of the module
+ # module_cname string C name of Python module object
+ # #module_dict_cname string C name of module dict object
+ # method_table_cname string C name of method table
+ # doc string Module doc string
+ # doc_cname string C name of module doc string
+ # utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
+ # c_includes {key: IncludeCode} C headers or verbatim code to be generated
+ # See process_include() for more documentation
+ # string_to_entry {string : Entry} Map string const to entry
+ # identifier_to_entry {string : Entry} Map identifier string const to entry
+ # context Context
+ # parent_module Scope Parent in the import namespace
+ # module_entries {string : Entry} For cimport statements
+ # type_names {string : 1} Set of type names (used during parsing)
+ # included_files [string] Cython sources included with 'include'
+ # pxd_file_loaded boolean Corresponding .pxd file has been processed
+ # cimported_modules [ModuleScope] Modules imported with cimport
+ # types_imported {PyrexType} Set of types for which import code generated
+ # has_import_star boolean Module contains import *
+ # cpp boolean Compiling a C++ file
+ # is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
+ # is_package boolean Is this a package module? (__init__)
+
+ is_module_scope = 1
+ has_import_star = 0
+ is_cython_builtin = 0
+ old_style_globals = 0
+
+ def __init__(self, name, parent_module, context):
+ from . import Builtin
+ self.parent_module = parent_module
+ outer_scope = Builtin.builtin_scope
+ Scope.__init__(self, name, outer_scope, parent_module)
+ if name == "__init__":
+ # Treat Spam/__init__.pyx specially, so that when Python loads
+ # Spam/__init__.so, initSpam() is defined.
+ self.module_name = parent_module.module_name
+ self.is_package = True
+ else:
+ self.module_name = name
+ self.is_package = False
+ self.module_name = EncodedString(self.module_name)
+ self.context = context
+ self.module_cname = Naming.module_cname
+ self.module_dict_cname = Naming.moddict_cname
+ self.method_table_cname = Naming.methtable_cname
+ self.doc = ""
+ self.doc_cname = Naming.moddoc_cname
+ self.utility_code_list = []
+ self.module_entries = {}
+ self.c_includes = {}
+ self.type_names = dict(outer_scope.type_names)
+ self.pxd_file_loaded = 0
+ self.cimported_modules = []
+ self.types_imported = set()
+ self.included_files = []
+ self.has_extern_class = 0
+ self.cached_builtins = []
+ self.undeclared_cached_builtins = []
+ self.namespace_cname = self.module_cname
+ self._cached_tuple_types = {}
+ for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
+ '__spec__', '__loader__', '__package__', '__cached__']:
+ self.declare_var(EncodedString(var_name), py_object_type, None)
+ self.process_include(Code.IncludeCode("Python.h", initial=True))
+
+ def qualifying_scope(self):
+ return self.parent_module
+
+ def global_scope(self):
+ return self
+
+ def lookup(self, name, language_level=None, str_is_str=None):
+ entry = self.lookup_here(name)
+ if entry is not None:
+ return entry
+
+ if language_level is None:
+ language_level = self.context.language_level if self.context is not None else 3
+ if str_is_str is None:
+ str_is_str = language_level == 2 or (
+ self.context is not None and Future.unicode_literals not in self.context.future_directives)
+
+ return self.outer_scope.lookup(name, language_level=language_level, str_is_str=str_is_str)
+
+ def declare_tuple_type(self, pos, components):
+ components = tuple(components)
+ try:
+ ttype = self._cached_tuple_types[components]
+ except KeyError:
+ ttype = self._cached_tuple_types[components] = PyrexTypes.c_tuple_type(components)
+ cname = ttype.cname
+ entry = self.lookup_here(cname)
+ if not entry:
+ scope = StructOrUnionScope(cname)
+ for ix, component in enumerate(components):
+ scope.declare_var(name="f%s" % ix, type=component, pos=pos)
+ struct_entry = self.declare_struct_or_union(
+ cname + '_struct', 'struct', scope, typedef_flag=True, pos=pos, cname=cname)
+ self.type_entries.remove(struct_entry)
+ ttype.struct_entry = struct_entry
+ entry = self.declare_type(cname, ttype, pos, cname)
+ ttype.entry = entry
+ return entry
+
+ def declare_builtin(self, name, pos):
+ if not hasattr(builtins, name) \
+ and name not in Code.non_portable_builtins_map \
+ and name not in Code.uncachable_builtins:
+ if self.has_import_star:
+ entry = self.declare_var(name, py_object_type, pos)
+ return entry
+ else:
+ if Options.error_on_unknown_names:
+ error(pos, "undeclared name not builtin: %s" % name)
+ else:
+ warning(pos, "undeclared name not builtin: %s" % name, 2)
+ # unknown - assume it's builtin and look it up at runtime
+ entry = self.declare(name, None, py_object_type, pos, 'private')
+ entry.is_builtin = 1
+ return entry
+ if Options.cache_builtins:
+ for entry in self.cached_builtins:
+ if entry.name == name:
+ return entry
+ if name == 'globals' and not self.old_style_globals:
+ return self.outer_scope.lookup('__Pyx_Globals')
+ else:
+ entry = self.declare(None, None, py_object_type, pos, 'private')
+ if Options.cache_builtins and name not in Code.uncachable_builtins:
+ entry.is_builtin = 1
+ entry.is_const = 1 # cached
+ entry.name = name
+ entry.cname = Naming.builtin_prefix + name
+ self.cached_builtins.append(entry)
+ self.undeclared_cached_builtins.append(entry)
+ else:
+ entry.is_builtin = 1
+ entry.name = name
+ entry.qualified_name = self.builtin_scope().qualify_name(name)
+ return entry
+
+ def find_module(self, module_name, pos, relative_level=-1, need_pxd=1):
+ # Find a module in the import namespace, interpreting
+ # relative imports relative to this module's parent.
+ # Finds and parses the module's .pxd file if the module
+ # has not been referenced before.
+ relative_to = None
+ absolute_fallback = False
+ if relative_level is not None and relative_level > 0:
+ # explicit relative cimport
+ # error of going beyond top-level is handled in cimport node
+ relative_to = self
+ while relative_level > 0 and relative_to:
+ relative_to = relative_to.parent_module
+ relative_level -= 1
+ elif relative_level != 0:
+ # -1 or None: try relative cimport first, then absolute
+ relative_to = self.parent_module
+ absolute_fallback = True
+
+ module_scope = self.global_scope()
+ return module_scope.context.find_module(
+ module_name, relative_to=relative_to, pos=pos, absolute_fallback=absolute_fallback, need_pxd=need_pxd)
+
+ def find_submodule(self, name):
+ # Find and return scope for a submodule of this module,
+ # creating a new empty one if necessary. Doesn't parse .pxd.
+ if '.' in name:
+ name, submodule = name.split('.', 1)
+ else:
+ submodule = None
+ scope = self.lookup_submodule(name)
+ if not scope:
+ scope = ModuleScope(name, parent_module=self, context=self.context)
+ self.module_entries[name] = scope
+ if submodule:
+ scope = scope.find_submodule(submodule)
+ return scope
+
+ def lookup_submodule(self, name):
+ # Return scope for submodule of this module, or None.
+ if '.' in name:
+ name, submodule = name.split('.', 1)
+ else:
+ submodule = None
+ module = self.module_entries.get(name, None)
+ if submodule and module is not None:
+ module = module.lookup_submodule(submodule)
+ return module
+
+ def add_include_file(self, filename, verbatim_include=None, late=False):
+ """
+ Add `filename` as include file. Add `verbatim_include` as
+ verbatim text in the C file.
+ Both `filename` and `verbatim_include` can be `None` or empty.
+ """
+ inc = Code.IncludeCode(filename, verbatim_include, late=late)
+ self.process_include(inc)
+
+ def process_include(self, inc):
+ """
+ Add `inc`, which is an instance of `IncludeCode`, to this
+ `ModuleScope`. This either adds a new element to the
+ `c_includes` dict or it updates an existing entry.
+
+ In detail: the values of the dict `self.c_includes` are
+ instances of `IncludeCode` containing the code to be put in the
+ generated C file. The keys of the dict are needed to ensure
+ uniqueness in two ways: if an include file is specified in
+ multiple "cdef extern" blocks, only one `#include` statement is
+ generated. Second, the same include might occur multiple times
+ if we find it through multiple "cimport" paths. So we use the
+ generated code (of the form `#include "header.h"`) as dict key.
+
+ If verbatim code does not belong to any include file (i.e. it
+ was put in a `cdef extern from *` block), then we use a unique
+ dict key: namely, the `sortkey()`.
+
+ One `IncludeCode` object can contain multiple pieces of C code:
+ one optional "main piece" for the include file and several other
+ pieces for the verbatim code. The `IncludeCode.dict_update`
+ method merges the pieces of two different `IncludeCode` objects
+ if needed.
+ """
+ key = inc.mainpiece()
+ if key is None:
+ key = inc.sortkey()
+ inc.dict_update(self.c_includes, key)
+ inc = self.c_includes[key]
+
+ def add_imported_module(self, scope):
+ if scope not in self.cimported_modules:
+ for inc in scope.c_includes.values():
+ self.process_include(inc)
+ self.cimported_modules.append(scope)
+ for m in scope.cimported_modules:
+ self.add_imported_module(m)
+
+ def add_imported_entry(self, name, entry, pos):
+ if entry.is_pyglobal:
+ # Allow cimports to follow imports.
+ entry.is_variable = True
+ if entry not in self.entries:
+ self.entries[name] = entry
+ else:
+ warning(pos, "'%s' redeclared " % name, 0)
+
+ def declare_module(self, name, scope, pos):
+ # Declare a cimported module. This is represented as a
+ # Python module-level variable entry with a module
+ # scope attached to it. Reports an error and returns
+ # None if previously declared as something else.
+ entry = self.lookup_here(name)
+ if entry:
+ if entry.is_pyglobal and entry.as_module is scope:
+ return entry # Already declared as the same module
+ if not (entry.is_pyglobal and not entry.as_module):
+ # SAGE -- I put this here so Pyrex
+ # cimport's work across directories.
+ # Currently it tries to multiply define
+ # every module appearing in an import list.
+ # It shouldn't be an error for a module
+ # name to appear again, and indeed the generated
+ # code compiles fine.
+ return entry
+ else:
+ entry = self.declare_var(name, py_object_type, pos)
+ entry.is_variable = 0
+ entry.as_module = scope
+ self.add_imported_module(scope)
+ return entry
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0):
+ # Add an entry for a global variable. If it is a Python
+ # object type, and not declared with cdef, it will live
+ # in the module dictionary, otherwise it will be a C
+ # global variable.
+ if not visibility in ('private', 'public', 'extern'):
+ error(pos, "Module-level variable cannot be declared %s" % visibility)
+ if not is_cdef:
+ if type is unspecified_type:
+ type = py_object_type
+ if not (type.is_pyobject and not type.is_extension_type):
+ raise InternalError(
+ "Non-cdef global variable is not a generic Python object")
+
+ if not cname:
+ defining = not in_pxd
+ if visibility == 'extern' or (visibility == 'public' and defining):
+ cname = name
+ else:
+ cname = self.mangle(Naming.var_prefix, name)
+
+ entry = self.lookup_here(name)
+ if entry and entry.defined_in_pxd:
+ #if visibility != 'private' and visibility != entry.visibility:
+ # warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
+ if not entry.type.same_as(type):
+ if visibility == 'extern' and entry.visibility == 'extern':
+ warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
+ entry.type = type
+ #else:
+ # error(pos, "Variable '%s' type does not match previous declaration" % name)
+ if entry.visibility != "private":
+ mangled_cname = self.mangle(Naming.var_prefix, name)
+ if entry.cname == mangled_cname:
+ cname = name
+ entry.cname = name
+ if not entry.is_implemented:
+ entry.is_implemented = True
+ return entry
+
+ entry = Scope.declare_var(self, name, type, pos,
+ cname=cname, visibility=visibility,
+ api=api, in_pxd=in_pxd, is_cdef=is_cdef)
+ if is_cdef:
+ entry.is_cglobal = 1
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
+ self.var_entries.append(entry)
+ else:
+ entry.is_pyglobal = 1
+ if Options.cimport_from_pyx:
+ entry.used = 1
+ return entry
+
+ def declare_cfunction(self, name, type, pos,
+ cname=None, visibility='private', api=0, in_pxd=0,
+ defining=0, modifiers=(), utility_code=None, overridable=False):
+ if not defining and 'inline' in modifiers:
+ # TODO(github/1736): Make this an error.
+ warning(pos, "Declarations should not be declared inline.", 1)
+ # Add an entry for a C function.
+ if not cname:
+ if visibility == 'extern' or (visibility == 'public' and defining):
+ cname = name
+ else:
+ cname = self.mangle(Naming.func_prefix, name)
+ if visibility == 'extern' and type.optional_arg_count:
+ error(pos, "Extern functions cannot have default arguments values.")
+ entry = self.lookup_here(name)
+ if entry and entry.defined_in_pxd:
+ if entry.visibility != "private":
+ mangled_cname = self.mangle(Naming.var_prefix, name)
+ if entry.cname == mangled_cname:
+ cname = name
+ entry.cname = cname
+ entry.func_cname = cname
+ entry = Scope.declare_cfunction(
+ self, name, type, pos,
+ cname=cname, visibility=visibility, api=api, in_pxd=in_pxd,
+ defining=defining, modifiers=modifiers, utility_code=utility_code,
+ overridable=overridable)
+ return entry
+
+ def declare_global(self, name, pos):
+ entry = self.lookup_here(name)
+ if not entry:
+ self.declare_var(name, py_object_type, pos)
+
+ def use_utility_code(self, new_code):
+ if new_code is not None:
+ self.utility_code_list.append(new_code)
+
+ def use_entry_utility_code(self, entry):
+ if entry is None:
+ return
+ if entry.utility_code:
+ self.utility_code_list.append(entry.utility_code)
+ if entry.utility_code_definition:
+ self.utility_code_list.append(entry.utility_code_definition)
+
+ def declare_c_class(self, name, pos, defining=0, implementing=0,
+ module_name=None, base_type=None, objstruct_cname=None,
+ typeobj_cname=None, typeptr_cname=None, visibility='private',
+ typedef_flag=0, api=0, check_size=None,
+ buffer_defaults=None, shadow=0):
+ # If this is a non-extern typedef class, expose the typedef, but use
+ # the non-typedef struct internally to avoid needing forward
+ # declarations for anonymous structs.
+ if typedef_flag and visibility != 'extern':
+ if not (visibility == 'public' or api):
+ warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
+ objtypedef_cname = objstruct_cname
+ typedef_flag = 0
+ else:
+ objtypedef_cname = None
+ #
+ # Look for previous declaration as a type
+ #
+ entry = self.lookup_here(name)
+ if entry and not shadow:
+ type = entry.type
+ if not (entry.is_type and type.is_extension_type):
+ entry = None # Will cause redeclaration and produce an error
+ else:
+ scope = type.scope
+ if typedef_flag and (not scope or scope.defined):
+ self.check_previous_typedef_flag(entry, typedef_flag, pos)
+ if (scope and scope.defined) or (base_type and type.base_type):
+ if base_type and base_type is not type.base_type:
+ error(pos, "Base type does not match previous declaration")
+ if base_type and not type.base_type:
+ type.base_type = base_type
+ #
+ # Make a new entry if needed
+ #
+ if not entry or shadow:
+ type = PyrexTypes.PyExtensionType(
+ name, typedef_flag, base_type, visibility == 'extern', check_size=check_size)
+ type.pos = pos
+ type.buffer_defaults = buffer_defaults
+ if objtypedef_cname is not None:
+ type.objtypedef_cname = objtypedef_cname
+ if visibility == 'extern':
+ type.module_name = module_name
+ else:
+ type.module_name = self.qualified_name
+ if typeptr_cname:
+ type.typeptr_cname = typeptr_cname
+ else:
+ type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
+ entry = self.declare_type(name, type, pos, visibility = visibility,
+ defining = 0, shadow = shadow)
+ entry.is_cclass = True
+ if objstruct_cname:
+ type.objstruct_cname = objstruct_cname
+ elif not entry.in_cinclude:
+ type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
+ else:
+ error(entry.pos,
+ "Object name required for 'public' or 'extern' C class")
+ self.attach_var_entry_to_c_class(entry)
+ self.c_class_entries.append(entry)
+ #
+ # Check for re-definition and create scope if needed
+ #
+ if not type.scope:
+ if defining or implementing:
+ scope = CClassScope(name = name, outer_scope = self,
+ visibility = visibility)
+ scope.directives = self.directives.copy()
+ if base_type and base_type.scope:
+ scope.declare_inherited_c_attributes(base_type.scope)
+ type.set_scope(scope)
+ self.type_entries.append(entry)
+ else:
+ if defining and type.scope.defined:
+ error(pos, "C class '%s' already defined" % name)
+ elif implementing and type.scope.implemented:
+ error(pos, "C class '%s' already implemented" % name)
+ #
+ # Fill in options, checking for compatibility with any previous declaration
+ #
+ if defining:
+ entry.defined_in_pxd = 1
+ if implementing: # So that filenames in runtime exceptions refer to
+ entry.pos = pos # the .pyx file and not the .pxd file
+ if visibility != 'private' and entry.visibility != visibility:
+ error(pos, "Class '%s' previously declared as '%s'"
+ % (name, entry.visibility))
+ if api:
+ entry.api = 1
+ if objstruct_cname:
+ if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
+ error(pos, "Object struct name differs from previous declaration")
+ type.objstruct_cname = objstruct_cname
+ if typeobj_cname:
+ if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
+ error(pos, "Type object name differs from previous declaration")
+ type.typeobj_cname = typeobj_cname
+
+ if self.directives.get('final'):
+ entry.type.is_final_type = True
+
+ # cdef classes are always exported, but we need to set it to
+ # distinguish between unused Cython utility code extension classes
+ entry.used = True
+
+ #
+ # Return new or existing entry
+ #
+ return entry
+
+ def allocate_vtable_names(self, entry):
+ # If extension type has a vtable, allocate vtable struct and
+ # slot names for it.
+ type = entry.type
+ if type.base_type and type.base_type.vtabslot_cname:
+ #print "...allocating vtabslot_cname because base type has one" ###
+ type.vtabslot_cname = "%s.%s" % (
+ Naming.obj_base_cname, type.base_type.vtabslot_cname)
+ elif type.scope and type.scope.cfunc_entries:
+ # one special case here: when inheriting from builtin
+ # types, the methods may also be built-in, in which
+ # case they won't need a vtable
+ entry_count = len(type.scope.cfunc_entries)
+ base_type = type.base_type
+ while base_type:
+ # FIXME: this will break if we ever get non-inherited C methods
+ if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
+ break
+ if base_type.is_builtin_type:
+ # builtin base type defines all methods => no vtable needed
+ return
+ base_type = base_type.base_type
+ #print "...allocating vtabslot_cname because there are C methods" ###
+ type.vtabslot_cname = Naming.vtabslot_cname
+ if type.vtabslot_cname:
+ #print "...allocating other vtable related cnames" ###
+ type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
+ type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
+
+ def check_c_classes_pxd(self):
+ # Performs post-analysis checking and finishing up of extension types
+ # being implemented in this module. This is called only for the .pxd.
+ #
+ # Checks all extension types declared in this scope to
+ # make sure that:
+ #
+ # * The extension type is fully declared
+ #
+ # Also allocates a name for the vtable if needed.
+ #
+ for entry in self.c_class_entries:
+ # Check defined
+ if not entry.type.scope:
+ error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
+
+ def check_c_class(self, entry):
+ type = entry.type
+ name = entry.name
+ visibility = entry.visibility
+ # Check defined
+ if not type.scope:
+ error(entry.pos, "C class '%s' is declared but not defined" % name)
+ # Generate typeobj_cname
+ if visibility != 'extern' and not type.typeobj_cname:
+ type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
+ ## Generate typeptr_cname
+ #type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
+ # Check C methods defined
+ if type.scope:
+ for method_entry in type.scope.cfunc_entries:
+ if not method_entry.is_inherited and not method_entry.func_cname:
+ error(method_entry.pos, "C method '%s' is declared but not defined" %
+ method_entry.name)
+ # Allocate vtable name if necessary
+ if type.vtabslot_cname:
+ #print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
+ type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
+
+ def check_c_classes(self):
+ # Performs post-analysis checking and finishing up of extension types
+ # being implemented in this module. This is called only for the main
+ # .pyx file scope, not for cimported .pxd scopes.
+ #
+ # Checks all extension types declared in this scope to
+ # make sure that:
+ #
+ # * The extension type is implemented
+ # * All required object and type names have been specified or generated
+ # * All non-inherited C methods are implemented
+ #
+ # Also allocates a name for the vtable if needed.
+ #
+ debug_check_c_classes = 0
+ if debug_check_c_classes:
+ print("Scope.check_c_classes: checking scope " + self.qualified_name)
+ for entry in self.c_class_entries:
+ if debug_check_c_classes:
+ print("...entry %s %s" % (entry.name, entry))
+ print("......type = ", entry.type)
+ print("......visibility = ", entry.visibility)
+ self.check_c_class(entry)
+
+ def check_c_functions(self):
+ # Performs post-analysis checking making sure all
+ # defined c functions are actually implemented.
+ for name, entry in self.entries.items():
+ if entry.is_cfunction:
+ if (entry.defined_in_pxd
+ and entry.scope is self
+ and entry.visibility != 'extern'
+ and not entry.in_cinclude
+ and not entry.is_implemented):
+ error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
+
+ def attach_var_entry_to_c_class(self, entry):
+ # The name of an extension class has to serve as both a type
+ # name and a variable name holding the type object. It is
+ # represented in the symbol table by a type entry with a
+ # variable entry attached to it. For the variable entry,
+ # we use a read-only C global variable whose name is an
+ # expression that refers to the type object.
+ from . import Builtin
+ var_entry = Entry(name = entry.name,
+ type = Builtin.type_type,
+ pos = entry.pos,
+ cname = entry.type.typeptr_cname)
+ var_entry.qualified_name = entry.qualified_name
+ var_entry.is_variable = 1
+ var_entry.is_cglobal = 1
+ var_entry.is_readonly = 1
+ var_entry.scope = entry.scope
+ entry.as_variable = var_entry
+
+ def is_cpp(self):
+ return self.cpp
+
+ def infer_types(self):
+ from .TypeInference import PyObjectTypeInferer
+ PyObjectTypeInferer().infer_types(self)
+
+
+class LocalScope(Scope):
+
+ # Does the function have a 'with gil:' block?
+ has_with_gil_block = False
+
+ # Transient attribute, used for symbol table variable declarations
+ _in_with_gil_block = False
+
+ def __init__(self, name, outer_scope, parent_scope = None):
+ if parent_scope is None:
+ parent_scope = outer_scope
+ Scope.__init__(self, name, outer_scope, parent_scope)
+
+ def mangle(self, prefix, name):
+ return prefix + name
+
+ def declare_arg(self, name, type, pos):
+ # Add an entry for an argument of a function.
+ cname = self.mangle(Naming.var_prefix, name)
+ entry = self.declare(name, cname, type, pos, 'private')
+ entry.is_variable = 1
+ if type.is_pyobject:
+ entry.init = "0"
+ entry.is_arg = 1
+ #entry.borrowed = 1 # Not using borrowed arg refs for now
+ self.arg_entries.append(entry)
+ return entry
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0):
+ # Add an entry for a local variable.
+ if visibility in ('public', 'readonly'):
+ error(pos, "Local variable cannot be declared %s" % visibility)
+ entry = Scope.declare_var(self, name, type, pos,
+ cname=cname, visibility=visibility,
+ api=api, in_pxd=in_pxd, is_cdef=is_cdef)
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
+ entry.is_local = 1
+
+ entry.in_with_gil_block = self._in_with_gil_block
+ self.var_entries.append(entry)
+ return entry
+
+ def declare_global(self, name, pos):
+ # Pull entry from global scope into local scope.
+ if self.lookup_here(name):
+ warning(pos, "'%s' redeclared ", 0)
+ else:
+ entry = self.global_scope().lookup_target(name)
+ self.entries[name] = entry
+
+ def declare_nonlocal(self, name, pos):
+ # Pull entry from outer scope into local scope
+ orig_entry = self.lookup_here(name)
+ if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
+ error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
+ else:
+ entry = self.lookup(name)
+ if entry is None or not entry.from_closure:
+ error(pos, "no binding for nonlocal '%s' found" % name)
+
+ def lookup(self, name):
+ # Look up name in this scope or an enclosing one.
+ # Return None if not found.
+ entry = Scope.lookup(self, name)
+ if entry is not None:
+ entry_scope = entry.scope
+ while entry_scope.is_genexpr_scope:
+ entry_scope = entry_scope.outer_scope
+ if entry_scope is not self and entry_scope.is_closure_scope:
+ if hasattr(entry.scope, "scope_class"):
+ raise InternalError("lookup() after scope class created.")
+ # The actual c fragment for the different scopes differs
+ # on the outside and inside, so we make a new entry
+ entry.in_closure = True
+ inner_entry = InnerEntry(entry, self)
+ inner_entry.is_variable = True
+ self.entries[name] = inner_entry
+ return inner_entry
+ return entry
+
+ def mangle_closure_cnames(self, outer_scope_cname):
+ for scope in self.iter_local_scopes():
+ for entry in scope.entries.values():
+ if entry.from_closure:
+ cname = entry.outer_entry.cname
+ if self.is_passthrough:
+ entry.cname = cname
+ else:
+ if cname.startswith(Naming.cur_scope_cname):
+ cname = cname[len(Naming.cur_scope_cname)+2:]
+ entry.cname = "%s->%s" % (outer_scope_cname, cname)
+ elif entry.in_closure:
+ entry.original_cname = entry.cname
+ entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
+
+
+class GeneratorExpressionScope(Scope):
+ """Scope for generator expressions and comprehensions. As opposed
+ to generators, these can be easily inlined in some cases, so all
+ we really need is a scope that holds the loop variable(s).
+ """
+ is_genexpr_scope = True
+
+ def __init__(self, outer_scope):
+ parent_scope = outer_scope
+ # TODO: also ignore class scopes?
+ while parent_scope.is_genexpr_scope:
+ parent_scope = parent_scope.parent_scope
+ name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
+ Scope.__init__(self, name, outer_scope, parent_scope)
+ self.directives = outer_scope.directives
+ self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
+
+ # Class/ExtType scopes are filled at class creation time, i.e. from the
+ # module init function or surrounding function.
+ while outer_scope.is_genexpr_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
+ outer_scope = outer_scope.outer_scope
+ self.var_entries = outer_scope.var_entries # keep declarations outside
+ outer_scope.subscopes.add(self)
+
+ def mangle(self, prefix, name):
+ return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = True):
+ if type is unspecified_type:
+ # if the outer scope defines a type for this variable, inherit it
+ outer_entry = self.outer_scope.lookup(name)
+ if outer_entry and outer_entry.is_variable:
+ type = outer_entry.type # may still be 'unspecified_type' !
+ # the parent scope needs to generate code for the variable, but
+ # this scope must hold its name exclusively
+ cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_variable = True
+ if self.parent_scope.is_module_scope:
+ entry.is_cglobal = True
+ else:
+ entry.is_local = True
+ entry.in_subscope = True
+ self.var_entries.append(entry)
+ self.entries[name] = entry
+ return entry
+
+ def declare_pyfunction(self, name, pos, allow_redefine=False):
+ return self.outer_scope.declare_pyfunction(
+ name, pos, allow_redefine)
+
+ def declare_lambda_function(self, func_cname, pos):
+ return self.outer_scope.declare_lambda_function(func_cname, pos)
+
+ def add_lambda_def(self, def_node):
+ return self.outer_scope.add_lambda_def(def_node)
+
+
+class ClosureScope(LocalScope):
+
+ is_closure_scope = True
+
+ def __init__(self, name, scope_name, outer_scope, parent_scope=None):
+ LocalScope.__init__(self, name, outer_scope, parent_scope)
+ self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name)
+
+# def mangle_closure_cnames(self, scope_var):
+# for entry in self.entries.values() + self.temp_entries:
+# entry.in_closure = 1
+# LocalScope.mangle_closure_cnames(self, scope_var)
+
+# def mangle(self, prefix, name):
+# return "%s->%s" % (self.cur_scope_cname, name)
+# return "%s->%s" % (self.closure_cname, name)
+
+ def declare_pyfunction(self, name, pos, allow_redefine=False):
+ return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private')
+
+
+class StructOrUnionScope(Scope):
+ # Namespace of a C struct or union.
+
+ def __init__(self, name="?"):
+ Scope.__init__(self, name, None, None)
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0,
+ allow_pyobject=False, allow_memoryview=False):
+ # Add an entry for an attribute.
+ if not cname:
+ cname = name
+ if visibility == 'private':
+ cname = c_safe_identifier(cname)
+ if type.is_cfunction:
+ type = PyrexTypes.CPtrType(type)
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_variable = 1
+ self.var_entries.append(entry)
+ if type.is_pyobject and not allow_pyobject:
+ error(pos, "C struct/union member cannot be a Python object")
+ elif type.is_memoryviewslice and not allow_memoryview:
+ # Memory views wrap their buffer owner as a Python object.
+ error(pos, "C struct/union member cannot be a memory view")
+ if visibility != 'private':
+ error(pos, "C struct/union member cannot be declared %s" % visibility)
+ return entry
+
+ def declare_cfunction(self, name, type, pos,
+ cname=None, visibility='private', api=0, in_pxd=0,
+ defining=0, modifiers=(), overridable=False): # currently no utility code ...
+ if overridable:
+ error(pos, "C struct/union member cannot be declared 'cpdef'")
+ return self.declare_var(name, type, pos,
+ cname=cname, visibility=visibility)
+
+
+class ClassScope(Scope):
+ # Abstract base class for namespace of
+ # Python class or extension type.
+ #
+ # class_name string Python name of the class
+ # scope_prefix string Additional prefix for names
+ # declared in the class
+ # doc string or None Doc string
+
+ def __init__(self, name, outer_scope):
+ Scope.__init__(self, name, outer_scope, outer_scope)
+ self.class_name = name
+ self.doc = None
+
+ def lookup(self, name):
+ entry = Scope.lookup(self, name)
+ if entry:
+ return entry
+ if name == "classmethod":
+ # We don't want to use the builtin classmethod here 'cause it won't do the
+ # right thing in this scope (as the class members aren't still functions).
+ # Don't want to add a cfunction to this scope 'cause that would mess with
+ # the type definition, so we just return the right entry.
+ entry = Entry(
+ "classmethod",
+ "__Pyx_Method_ClassMethod",
+ PyrexTypes.CFuncType(
+ py_object_type,
+ [PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
+ entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c")
+ self.use_entry_utility_code(entry)
+ entry.is_cfunction = 1
+ return entry
+
+
+class PyClassScope(ClassScope):
+ # Namespace of a Python class.
+ #
+ # class_obj_cname string C variable holding class object
+
+ is_py_class_scope = 1
+
+ def mangle_class_private_name(self, name):
+ return self.mangle_special_name(name)
+
+ def mangle_special_name(self, name):
+ if name and name.startswith('__') and not name.endswith('__'):
+ name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name))
+ return name
+
+ def lookup_here(self, name):
+ name = self.mangle_special_name(name)
+ return ClassScope.lookup_here(self, name)
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0):
+ name = self.mangle_special_name(name)
+ if type is unspecified_type:
+ type = py_object_type
+ # Add an entry for a class attribute.
+ entry = Scope.declare_var(self, name, type, pos,
+ cname=cname, visibility=visibility,
+ api=api, in_pxd=in_pxd, is_cdef=is_cdef)
+ entry.is_pyglobal = 1
+ entry.is_pyclass_attr = 1
+ return entry
+
+ def declare_nonlocal(self, name, pos):
+ # Pull entry from outer scope into local scope
+ orig_entry = self.lookup_here(name)
+ if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
+ error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
+ else:
+ entry = self.lookup(name)
+ if entry is None:
+ error(pos, "no binding for nonlocal '%s' found" % name)
+ else:
+ # FIXME: this works, but it's unclear if it's the
+ # right thing to do
+ self.entries[name] = entry
+
+ def declare_global(self, name, pos):
+ # Pull entry from global scope into local scope.
+ if self.lookup_here(name):
+ warning(pos, "'%s' redeclared ", 0)
+ else:
+ entry = self.global_scope().lookup_target(name)
+ self.entries[name] = entry
+
+ def add_default_value(self, type):
+ return self.outer_scope.add_default_value(type)
+
+
+class CClassScope(ClassScope):
+ # Namespace of an extension type.
+ #
+ # parent_type CClassType
+ # #typeobj_cname string or None
+ # #objstruct_cname string
+ # method_table_cname string
+ # getset_table_cname string
+ # has_pyobject_attrs boolean Any PyObject attributes?
+ # has_memoryview_attrs boolean Any memory view attributes?
+ # has_cpp_class_attrs boolean Any (non-pointer) C++ attributes?
+ # has_cyclic_pyobject_attrs boolean Any PyObject attributes that may need GC?
+ # property_entries [Entry]
+ # defined boolean Defined in .pxd file
+ # implemented boolean Defined in .pyx file
+ # inherited_var_entries [Entry] Adapted var entries from base class
+
+ is_c_class_scope = 1
+ is_closure_class_scope = False
+
+ has_pyobject_attrs = False
+ has_memoryview_attrs = False
+ has_cpp_class_attrs = False
+ has_cyclic_pyobject_attrs = False
+ defined = False
+ implemented = False
+
+ def __init__(self, name, outer_scope, visibility):
+ ClassScope.__init__(self, name, outer_scope)
+ if visibility != 'extern':
+ self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
+ self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
+ self.property_entries = []
+ self.inherited_var_entries = []
+
+ def needs_gc(self):
+ # If the type or any of its base types have Python-valued
+ # C attributes, then it needs to participate in GC.
+ if self.has_cyclic_pyobject_attrs and not self.directives.get('no_gc', False):
+ return True
+ base_type = self.parent_type.base_type
+ if base_type and base_type.scope is not None:
+ return base_type.scope.needs_gc()
+ elif self.parent_type.is_builtin_type:
+ return not self.parent_type.is_gc_simple
+ return False
+
+ def needs_tp_clear(self):
+ """
+ Do we need to generate an implementation for the tp_clear slot? Can
+ be disabled to keep references for the __dealloc__ cleanup function.
+ """
+ return self.needs_gc() and not self.directives.get('no_gc_clear', False)
+
+ def get_refcounted_entries(self, include_weakref=False,
+ include_gc_simple=True):
+ py_attrs = []
+ py_buffers = []
+ memoryview_slices = []
+
+ for entry in self.var_entries:
+ if entry.type.is_pyobject:
+ if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"):
+ if include_gc_simple or not entry.type.is_gc_simple:
+ py_attrs.append(entry)
+ elif entry.type == PyrexTypes.c_py_buffer_type:
+ py_buffers.append(entry)
+ elif entry.type.is_memoryviewslice:
+ memoryview_slices.append(entry)
+
+ have_entries = py_attrs or py_buffers or memoryview_slices
+ return have_entries, (py_attrs, py_buffers, memoryview_slices)
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'private',
+ api = 0, in_pxd = 0, is_cdef = 0):
+ if is_cdef:
+ # Add an entry for an attribute.
+ if self.defined:
+ error(pos,
+ "C attributes cannot be added in implementation part of"
+ " extension type defined in a pxd")
+ if not self.is_closure_class_scope and get_special_method_signature(name):
+ error(pos,
+ "The name '%s' is reserved for a special method."
+ % name)
+ if not cname:
+ cname = name
+ if visibility == 'private':
+ cname = c_safe_identifier(cname)
+ if type.is_cpp_class and visibility != 'extern':
+ type.check_nullary_constructor(pos)
+ self.use_utility_code(Code.UtilityCode("#include <new>"))
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_variable = 1
+ self.var_entries.append(entry)
+ if type.is_memoryviewslice:
+ self.has_memoryview_attrs = True
+ elif type.is_cpp_class:
+ self.has_cpp_class_attrs = True
+ elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'):
+ self.has_pyobject_attrs = True
+ if (not type.is_builtin_type
+ or not type.scope or type.scope.needs_gc()):
+ self.has_cyclic_pyobject_attrs = True
+ if visibility not in ('private', 'public', 'readonly'):
+ error(pos,
+ "Attribute of extension type cannot be declared %s" % visibility)
+ if visibility in ('public', 'readonly'):
+ # If the field is an external typedef, we cannot be sure about the type,
+ # so do conversion ourself rather than rely on the CPython mechanism (through
+ # a property; made in AnalyseDeclarationsTransform).
+ entry.needs_property = True
+ if not self.is_closure_class_scope and name == "__weakref__":
+ error(pos, "Special attribute __weakref__ cannot be exposed to Python")
+ if not (type.is_pyobject or type.can_coerce_to_pyobject(self)):
+ # we're not testing for coercion *from* Python here - that would fail later
+ error(pos, "C attribute of type '%s' cannot be accessed from Python" % type)
+ else:
+ entry.needs_property = False
+ return entry
+ else:
+ if type is unspecified_type:
+ type = py_object_type
+ # Add an entry for a class attribute.
+ entry = Scope.declare_var(self, name, type, pos,
+ cname=cname, visibility=visibility,
+ api=api, in_pxd=in_pxd, is_cdef=is_cdef)
+ entry.is_member = 1
+ entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
+ # I keep it in for now. is_member should be enough
+ # later on
+ self.namespace_cname = "(PyObject *)%s" % self.parent_type.typeptr_cname
+ return entry
+
+ def declare_pyfunction(self, name, pos, allow_redefine=False):
+ # Add an entry for a method.
+ if name in richcmp_special_methods:
+ if self.lookup_here('__richcmp__'):
+ error(pos, "Cannot define both % and __richcmp__" % name)
+ elif name == '__richcmp__':
+ for n in richcmp_special_methods:
+ if self.lookup_here(n):
+ error(pos, "Cannot define both % and __richcmp__" % n)
+ if name == "__new__":
+ error(pos, "__new__ method of extension type will change semantics "
+ "in a future version of Pyrex and Cython. Use __cinit__ instead.")
+ entry = self.declare_var(name, py_object_type, pos,
+ visibility='extern')
+ special_sig = get_special_method_signature(name)
+ if special_sig:
+ # Special methods get put in the method table with a particular
+ # signature declared in advance.
+ entry.signature = special_sig
+ entry.is_special = 1
+ else:
+ entry.signature = pymethod_signature
+ entry.is_special = 0
+
+ self.pyfunc_entries.append(entry)
+ return entry
+
+ def lookup_here(self, name):
+ if not self.is_closure_class_scope and name == "__new__":
+ name = EncodedString("__cinit__")
+ entry = ClassScope.lookup_here(self, name)
+ if entry and entry.is_builtin_cmethod:
+ if not self.parent_type.is_builtin_type:
+ # For subtypes of builtin types, we can only return
+ # optimised C methods if the type if final.
+ # Otherwise, subtypes may choose to override the
+ # method, but the optimisation would prevent the
+ # subtype method from being called.
+ if not self.parent_type.is_final_type:
+ return None
+ return entry
+
+ def declare_cfunction(self, name, type, pos,
+ cname=None, visibility='private', api=0, in_pxd=0,
+ defining=0, modifiers=(), utility_code=None, overridable=False):
+ if get_special_method_signature(name) and not self.parent_type.is_builtin_type:
+ error(pos, "Special methods must be declared with 'def', not 'cdef'")
+ args = type.args
+ if not type.is_static_method:
+ if not args:
+ error(pos, "C method has no self argument")
+ elif not self.parent_type.assignable_from(args[0].type):
+ error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" %
+ (args[0].type, name, self.parent_type))
+ entry = self.lookup_here(name)
+ if cname is None:
+ cname = c_safe_identifier(name)
+ if entry:
+ if not entry.is_cfunction:
+ warning(pos, "'%s' redeclared " % name, 0)
+ else:
+ if defining and entry.func_cname:
+ error(pos, "'%s' already defined" % name)
+ #print "CClassScope.declare_cfunction: checking signature" ###
+ if entry.is_final_cmethod and entry.is_inherited:
+ error(pos, "Overriding final methods is not allowed")
+ elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
+ elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
+ if (self.defined and not in_pxd
+ and not type.same_c_signature_as_resolved_type(entry.type, as_cmethod = 1, as_pxd_definition = 1)):
+ # TODO(robertwb): Make this an error.
+ warning(pos,
+ "Compatible but non-identical C method '%s' not redeclared "
+ "in definition part of extension type '%s'. "
+ "This may cause incorrect vtables to be generated." % (
+ name, self.class_name), 2)
+ warning(entry.pos, "Previous declaration is here", 2)
+ entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
+ else:
+ error(pos, "Signature not compatible with previous declaration")
+ error(entry.pos, "Previous declaration is here")
+ else:
+ if self.defined:
+ error(pos,
+ "C method '%s' not previously declared in definition part of"
+ " extension type '%s'" % (name, self.class_name))
+ entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
+ if defining:
+ entry.func_cname = self.mangle(Naming.func_prefix, name)
+ entry.utility_code = utility_code
+ type.entry = entry
+
+ if u'inline' in modifiers:
+ entry.is_inline_cmethod = True
+
+ if (self.parent_type.is_final_type or entry.is_inline_cmethod or
+ self.directives.get('final')):
+ entry.is_final_cmethod = True
+ entry.final_func_cname = entry.func_cname
+
+ return entry
+
+ def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
+ # Add a cfunction entry without giving it a func_cname.
+ prev_entry = self.lookup_here(name)
+ entry = ClassScope.add_cfunction(self, name, type, pos, cname,
+ visibility, modifiers, inherited=inherited)
+ entry.is_cmethod = 1
+ entry.prev_entry = prev_entry
+ return entry
+
+ def declare_builtin_cfunction(self, name, type, cname, utility_code = None):
+ # overridden methods of builtin types still have their Python
+ # equivalent that must be accessible to support bound methods
+ name = EncodedString(name)
+ entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
+ utility_code=utility_code)
+ var_entry = Entry(name, name, py_object_type)
+ var_entry.qualified_name = name
+ var_entry.is_variable = 1
+ var_entry.is_builtin = 1
+ var_entry.utility_code = utility_code
+ var_entry.scope = entry.scope
+ entry.as_variable = var_entry
+ return entry
+
+ def declare_property(self, name, doc, pos):
+ entry = self.lookup_here(name)
+ if entry is None:
+ entry = self.declare(name, name, py_object_type, pos, 'private')
+ entry.is_property = 1
+ entry.doc = doc
+ entry.scope = PropertyScope(name,
+ outer_scope = self.global_scope(), parent_scope = self)
+ entry.scope.parent_type = self.parent_type
+ self.property_entries.append(entry)
+ return entry
+
+ def declare_inherited_c_attributes(self, base_scope):
+ # Declare entries for all the C attributes of an
+ # inherited type, with cnames modified appropriately
+ # to work with this type.
+ def adapt(cname):
+ return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
+
+ entries = base_scope.inherited_var_entries + base_scope.var_entries
+ for base_entry in entries:
+ entry = self.declare(
+ base_entry.name, adapt(base_entry.cname),
+ base_entry.type, None, 'private')
+ entry.is_variable = 1
+ self.inherited_var_entries.append(entry)
+
+ # If the class defined in a pxd, specific entries have not been added.
+ # Ensure now that the parent (base) scope has specific entries
+ # Iterate over a copy as get_all_specialized_function_types() will mutate
+ for base_entry in base_scope.cfunc_entries[:]:
+ if base_entry.type.is_fused:
+ base_entry.type.get_all_specialized_function_types()
+
+ for base_entry in base_scope.cfunc_entries:
+ cname = base_entry.cname
+ var_entry = base_entry.as_variable
+ is_builtin = var_entry and var_entry.is_builtin
+ if not is_builtin:
+ cname = adapt(cname)
+ entry = self.add_cfunction(base_entry.name, base_entry.type,
+ base_entry.pos, cname,
+ base_entry.visibility, base_entry.func_modifiers, inherited=True)
+ entry.is_inherited = 1
+ if base_entry.is_final_cmethod:
+ entry.is_final_cmethod = True
+ entry.is_inline_cmethod = base_entry.is_inline_cmethod
+ if (self.parent_scope == base_scope.parent_scope or
+ entry.is_inline_cmethod):
+ entry.final_func_cname = base_entry.final_func_cname
+ if is_builtin:
+ entry.is_builtin_cmethod = True
+ entry.as_variable = var_entry
+ if base_entry.utility_code:
+ entry.utility_code = base_entry.utility_code
+
+
+class CppClassScope(Scope):
+ # Namespace of a C++ class.
+
+ is_cpp_class_scope = 1
+
+ default_constructor = None
+ type = None
+
+ def __init__(self, name, outer_scope, templates=None):
+ Scope.__init__(self, name, outer_scope, None)
+ self.directives = outer_scope.directives
+ self.inherited_var_entries = []
+ if templates is not None:
+ for T in templates:
+ template_entry = self.declare(
+ T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
+ template_entry.is_type = 1
+
+ def declare_var(self, name, type, pos,
+ cname = None, visibility = 'extern',
+ api = 0, in_pxd = 0, is_cdef = 0, defining = 0):
+ # Add an entry for an attribute.
+ if not cname:
+ cname = name
+ entry = self.lookup_here(name)
+ if defining and entry is not None:
+ if entry.type.same_as(type):
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
+ elif type.is_cfunction and type.compatible_signature_with(entry.type):
+ entry.type = type
+ else:
+ error(pos, "Function signature does not match previous declaration")
+ else:
+ entry = self.declare(name, cname, type, pos, visibility)
+ entry.is_variable = 1
+ if type.is_cfunction and self.type:
+ if not self.type.get_fused_types():
+ entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname)
+ if name != "this" and (defining or name != "<init>"):
+ self.var_entries.append(entry)
+ return entry
+
+ def declare_cfunction(self, name, type, pos,
+ cname=None, visibility='extern', api=0, in_pxd=0,
+ defining=0, modifiers=(), utility_code=None, overridable=False):
+ class_name = self.name.split('::')[-1]
+ if name in (class_name, '__init__') and cname is None:
+ cname = "%s__init__%s" % (Naming.func_prefix, class_name)
+ name = '<init>'
+ type.return_type = PyrexTypes.CVoidType()
+ # This is called by the actual constructor, but need to support
+ # arguments that cannot by called by value.
+ type.original_args = type.args
+ def maybe_ref(arg):
+ if arg.type.is_cpp_class and not arg.type.is_reference:
+ return PyrexTypes.CFuncTypeArg(
+ arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
+ else:
+ return arg
+ type.args = [maybe_ref(arg) for arg in type.args]
+ elif name == '__dealloc__' and cname is None:
+ cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
+ name = '<del>'
+ type.return_type = PyrexTypes.CVoidType()
+ if name in ('<init>', '<del>') and type.nogil:
+ for base in self.type.base_classes:
+ base_entry = base.scope.lookup(name)
+ if base_entry and not base_entry.type.nogil:
+ error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
+ error(base_entry.pos, "Base constructor defined here.")
+ prev_entry = self.lookup_here(name)
+ entry = self.declare_var(name, type, pos,
+ defining=defining,
+ cname=cname, visibility=visibility)
+ if prev_entry and not defining:
+ entry.overloaded_alternatives = prev_entry.all_alternatives()
+ entry.utility_code = utility_code
+ type.entry = entry
+ return entry
+
+ def declare_inherited_cpp_attributes(self, base_class):
+ base_scope = base_class.scope
+ template_type = base_class
+ while getattr(template_type, 'template_type', None):
+ template_type = template_type.template_type
+ if getattr(template_type, 'templates', None):
+ base_templates = [T.name for T in template_type.templates]
+ else:
+ base_templates = ()
+ # Declare entries for all the C++ attributes of an
+ # inherited type, with cnames modified appropriately
+ # to work with this type.
+ for base_entry in \
+ base_scope.inherited_var_entries + base_scope.var_entries:
+ #constructor/destructor is not inherited
+ if base_entry.name in ("<init>", "<del>"):
+ continue
+ #print base_entry.name, self.entries
+ if base_entry.name in self.entries:
+ base_entry.name # FIXME: is there anything to do in this case?
+ entry = self.declare(base_entry.name, base_entry.cname,
+ base_entry.type, None, 'extern')
+ entry.is_variable = 1
+ entry.is_inherited = 1
+ self.inherited_var_entries.append(entry)
+ for base_entry in base_scope.cfunc_entries:
+ entry = self.declare_cfunction(base_entry.name, base_entry.type,
+ base_entry.pos, base_entry.cname,
+ base_entry.visibility, api=0,
+ modifiers=base_entry.func_modifiers,
+ utility_code=base_entry.utility_code)
+ entry.is_inherited = 1
+ for base_entry in base_scope.type_entries:
+ if base_entry.name not in base_templates:
+ entry = self.declare_type(base_entry.name, base_entry.type,
+ base_entry.pos, base_entry.cname,
+ base_entry.visibility)
+ entry.is_inherited = 1
+
+ def specialize(self, values, type_entry):
+ scope = CppClassScope(self.name, self.outer_scope)
+ scope.type = type_entry
+ for entry in self.entries.values():
+ if entry.is_type:
+ scope.declare_type(entry.name,
+ entry.type.specialize(values),
+ entry.pos,
+ entry.cname,
+ template=1)
+ elif entry.type.is_cfunction:
+ for e in entry.all_alternatives():
+ scope.declare_cfunction(e.name,
+ e.type.specialize(values),
+ e.pos,
+ e.cname,
+ utility_code=e.utility_code)
+ else:
+ scope.declare_var(entry.name,
+ entry.type.specialize(values),
+ entry.pos,
+ entry.cname,
+ entry.visibility)
+
+ return scope
+
+
+class PropertyScope(Scope):
+ # Scope holding the __get__, __set__ and __del__ methods for
+ # a property of an extension type.
+ #
+ # parent_type PyExtensionType The type to which the property belongs
+
+ is_property_scope = 1
+
+ def declare_pyfunction(self, name, pos, allow_redefine=False):
+ # Add an entry for a method.
+ signature = get_property_accessor_signature(name)
+ if signature:
+ entry = self.declare(name, name, py_object_type, pos, 'private')
+ entry.is_special = 1
+ entry.signature = signature
+ return entry
+ else:
+ error(pos, "Only __get__, __set__ and __del__ methods allowed "
+ "in a property declaration")
+ return None
+
+
+class CConstScope(Scope):
+
+ def __init__(self, const_base_type_scope):
+ Scope.__init__(
+ self,
+ 'const_' + const_base_type_scope.name,
+ const_base_type_scope.outer_scope,
+ const_base_type_scope.parent_scope)
+ self.const_base_type_scope = const_base_type_scope
+
+ def lookup_here(self, name):
+ entry = self.const_base_type_scope.lookup_here(name)
+ if entry is not None:
+ entry = copy.copy(entry)
+ entry.type = PyrexTypes.c_const_type(entry.type)
+ return entry
+
+class TemplateScope(Scope):
+ def __init__(self, name, outer_scope):
+ Scope.__init__(self, name, outer_scope, None)
+ self.directives = outer_scope.directives
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestBuffer.py b/contrib/tools/cython/Cython/Compiler/Tests/TestBuffer.py
new file mode 100644
index 0000000000..1f69d96524
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestBuffer.py
@@ -0,0 +1,105 @@
+from Cython.TestUtils import CythonTest
+import Cython.Compiler.Errors as Errors
+from Cython.Compiler.Nodes import *
+from Cython.Compiler.ParseTreeTransforms import *
+from Cython.Compiler.Buffer import *
+
+
+class TestBufferParsing(CythonTest):
+ # First, we only test the raw parser, i.e.
+ # the number and contents of arguments are NOT checked.
+ # However "dtype"/the first positional argument is special-cased
+ # to parse a type argument rather than an expression
+
+ def parse(self, s):
+ return self.should_not_fail(lambda: self.fragment(s)).root
+
+ def not_parseable(self, expected_error, s):
+ e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
+ self.assertEqual(expected_error, e.message_only)
+
+ def test_basic(self):
+ t = self.parse(u"cdef object[float, 4, ndim=2, foo=foo] x")
+ bufnode = t.stats[0].base_type
+ self.assertTrue(isinstance(bufnode, TemplatedTypeNode))
+ self.assertEqual(2, len(bufnode.positional_args))
+# print bufnode.dump()
+ # should put more here...
+
+ def test_type_pos(self):
+ self.parse(u"cdef object[short unsigned int, 3] x")
+
+ def test_type_keyword(self):
+ self.parse(u"cdef object[foo=foo, dtype=short unsigned int] x")
+
+ def test_pos_after_key(self):
+ self.not_parseable("Non-keyword arg following keyword arg",
+ u"cdef object[foo=1, 2] x")
+
+
+# See also tests/error/e_bufaccess.pyx and tets/run/bufaccess.pyx
+# THESE TESTS ARE NOW DISABLED, the code they test was pretty much
+# refactored away
+class TestBufferOptions(CythonTest):
+ # Tests the full parsing of the options within the brackets
+
+ def nonfatal_error(self, error):
+ # We're passing self as context to transform to trap this
+ self.error = error
+ self.assertTrue(self.expect_error)
+
+ def parse_opts(self, opts, expect_error=False):
+ assert opts != ""
+ s = u"def f():\n cdef object[%s] x" % opts
+ self.expect_error = expect_error
+ root = self.fragment(s, pipeline=[NormalizeTree(self), PostParse(self)]).root
+ if not expect_error:
+ vardef = root.stats[0].body.stats[0]
+ assert isinstance(vardef, CVarDefNode) # use normal assert as this is to validate the test code
+ buftype = vardef.base_type
+ self.assertTrue(isinstance(buftype, TemplatedTypeNode))
+ self.assertTrue(isinstance(buftype.base_type_node, CSimpleBaseTypeNode))
+ self.assertEqual(u"object", buftype.base_type_node.name)
+ return buftype
+ else:
+ self.assertTrue(len(root.stats[0].body.stats) == 0)
+
+ def non_parse(self, expected_err, opts):
+ self.parse_opts(opts, expect_error=True)
+# e = self.should_fail(lambda: self.parse_opts(opts))
+ self.assertEqual(expected_err, self.error.message_only)
+
+ def __test_basic(self):
+ buf = self.parse_opts(u"unsigned short int, 3")
+ self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
+ self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
+ self.assertEqual(3, buf.ndim)
+
+ def __test_dict(self):
+ buf = self.parse_opts(u"ndim=3, dtype=unsigned short int")
+ self.assertTrue(isinstance(buf.dtype_node, CSimpleBaseTypeNode))
+ self.assertTrue(buf.dtype_node.signed == 0 and buf.dtype_node.longness == -1)
+ self.assertEqual(3, buf.ndim)
+
+ def __test_ndim(self):
+ self.parse_opts(u"int, 2")
+ self.non_parse(ERR_BUF_NDIM, u"int, 'a'")
+ self.non_parse(ERR_BUF_NDIM, u"int, -34")
+
+ def __test_use_DEF(self):
+ t = self.fragment(u"""
+ DEF ndim = 3
+ def f():
+ cdef object[int, ndim] x
+ cdef object[ndim=ndim, dtype=int] y
+ """, pipeline=[NormalizeTree(self), PostParse(self)]).root
+ stats = t.stats[0].body.stats
+ self.assertTrue(stats[0].base_type.ndim == 3)
+ self.assertTrue(stats[1].base_type.ndim == 3)
+
+ # add exotic and impossible combinations as they come along...
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
+
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py b/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py
new file mode 100644
index 0000000000..bd31da0007
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestCmdLine.py
@@ -0,0 +1,170 @@
+
+import sys
+import re
+from unittest import TestCase
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO # doesn't accept 'str' in Py2
+
+from .. import Options
+from ..CmdLine import parse_command_line
+
+
+def check_global_options(expected_options, white_list=[]):
+ """
+ returns error message of "" if check Ok
+ """
+ no_value = object()
+ for name, orig_value in expected_options.items():
+ if name not in white_list:
+ if getattr(Options, name, no_value) != orig_value:
+ return "error in option " + name
+ return ""
+
+
+class CmdLineParserTest(TestCase):
+ def setUp(self):
+ backup = {}
+ for name, value in vars(Options).items():
+ backup[name] = value
+ self._options_backup = backup
+
+ def tearDown(self):
+ no_value = object()
+ for name, orig_value in self._options_backup.items():
+ if getattr(Options, name, no_value) != orig_value:
+ setattr(Options, name, orig_value)
+
+ def check_default_global_options(self, white_list=[]):
+ self.assertEqual(check_global_options(self._options_backup, white_list), "")
+
+ def check_default_options(self, options, white_list=[]):
+ from ..Main import CompilationOptions, default_options
+ default_options = CompilationOptions(default_options)
+ no_value = object()
+ for name in default_options.__dict__.keys():
+ if name not in white_list:
+ self.assertEqual(getattr(options, name, no_value), getattr(default_options, name), msg="error in option " + name)
+
+ def test_short_options(self):
+ options, sources = parse_command_line([
+ '-V', '-l', '-+', '-t', '-v', '-v', '-v', '-p', '-D', '-a', '-3',
+ ])
+ self.assertFalse(sources)
+ self.assertTrue(options.show_version)
+ self.assertTrue(options.use_listing_file)
+ self.assertTrue(options.cplus)
+ self.assertTrue(options.timestamps)
+ self.assertTrue(options.verbose >= 3)
+ self.assertTrue(Options.embed_pos_in_docstring)
+ self.assertFalse(Options.docstrings)
+ self.assertTrue(Options.annotate)
+ self.assertEqual(options.language_level, 3)
+
+ options, sources = parse_command_line([
+ '-f', '-2', 'source.pyx',
+ ])
+ self.assertTrue(sources)
+ self.assertTrue(len(sources) == 1)
+ self.assertFalse(options.timestamps)
+ self.assertEqual(options.language_level, 2)
+
+ def test_long_options(self):
+ options, sources = parse_command_line([
+ '--version', '--create-listing', '--cplus', '--embed', '--timestamps',
+ '--verbose', '--verbose', '--verbose',
+ '--embed-positions', '--no-docstrings', '--annotate', '--lenient',
+ ])
+ self.assertFalse(sources)
+ self.assertTrue(options.show_version)
+ self.assertTrue(options.use_listing_file)
+ self.assertTrue(options.cplus)
+ self.assertEqual(Options.embed, 'main')
+ self.assertTrue(options.timestamps)
+ self.assertTrue(options.verbose >= 3)
+ self.assertTrue(Options.embed_pos_in_docstring)
+ self.assertFalse(Options.docstrings)
+ self.assertTrue(Options.annotate)
+ self.assertFalse(Options.error_on_unknown_names)
+ self.assertFalse(Options.error_on_uninitialized)
+
+ options, sources = parse_command_line([
+ '--force', 'source.pyx',
+ ])
+ self.assertTrue(sources)
+ self.assertTrue(len(sources) == 1)
+ self.assertFalse(options.timestamps)
+
+ def test_options_with_values(self):
+ options, sources = parse_command_line([
+ '--embed=huhu',
+ '-I/test/include/dir1', '--include-dir=/test/include/dir2',
+ '--include-dir', '/test/include/dir3',
+ '--working=/work/dir',
+ 'source.pyx',
+ '--output-file=/output/dir',
+ '--pre-import=/pre/import',
+ '--cleanup=3',
+ '--annotate-coverage=cov.xml',
+ '--gdb-outdir=/gdb/outdir',
+ '--directive=wraparound=false',
+ ])
+ self.assertEqual(sources, ['source.pyx'])
+ self.assertEqual(Options.embed, 'huhu')
+ self.assertEqual(options.include_path, ['/test/include/dir1', '/test/include/dir2', '/test/include/dir3'])
+ self.assertEqual(options.working_path, '/work/dir')
+ self.assertEqual(options.output_file, '/output/dir')
+ self.assertEqual(Options.pre_import, '/pre/import')
+ self.assertEqual(Options.generate_cleanup_code, 3)
+ self.assertTrue(Options.annotate)
+ self.assertEqual(Options.annotate_coverage_xml, 'cov.xml')
+ self.assertTrue(options.gdb_debug)
+ self.assertEqual(options.output_dir, '/gdb/outdir')
+
+ def test_module_name(self):
+ options, sources = parse_command_line([
+ 'source.pyx'
+ ])
+ self.assertEqual(options.module_name, None)
+ self.check_default_global_options()
+ self.check_default_options(options)
+ options, sources = parse_command_line([
+ '--module-name', 'foo.bar',
+ 'source.pyx'
+ ])
+ self.assertEqual(options.module_name, 'foo.bar')
+ self.check_default_global_options()
+ self.check_default_options(options, ['module_name'])
+
+ def test_errors(self):
+ def error(args, regex=None):
+ old_stderr = sys.stderr
+ stderr = sys.stderr = StringIO()
+ try:
+ self.assertRaises(SystemExit, parse_command_line, list(args))
+ finally:
+ sys.stderr = old_stderr
+ msg = stderr.getvalue().strip()
+ self.assertTrue(msg)
+ if regex:
+ self.assertTrue(re.search(regex, msg),
+ '"%s" does not match search "%s"' %
+ (msg, regex))
+
+ error(['-1'],
+ 'Unknown compiler flag: -1')
+ error(['-I'])
+ error(['--version=-a'])
+ error(['--version=--annotate=true'])
+ error(['--working'])
+ error(['--verbose=1'])
+ error(['--cleanup'])
+ error(['--debug-disposal-code-wrong-name', 'file3.pyx'],
+ "Unknown debug flag: debug_disposal_code_wrong_name")
+ error(['--module-name', 'foo.pyx'])
+ error(['--module-name', 'foo.bar'])
+ error(['--module-name', 'foo.bar', 'foo.pyx', 'bar.pyx'],
+ "Only one source file allowed when using --module-name")
+ error(['--module-name', 'foo.bar', '--timestamps', 'foo.pyx'],
+ "Cannot use --module-name with --timestamps")
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py b/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py
new file mode 100644
index 0000000000..443551ab88
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestFlowControl.py
@@ -0,0 +1,68 @@
+
+from __future__ import absolute_import
+
+from copy import deepcopy
+from unittest import TestCase
+
+from Cython.Compiler.FlowControl import (
+ NameAssignment, StaticAssignment, Argument, NameDeletion)
+
+
+class FakeType(object):
+ is_pyobject = True
+
+
+class FakeNode(object):
+ pos = ('filename.pyx', 1, 2)
+ cf_state = None
+ type = FakeType()
+
+ def infer_type(self, scope):
+ return self.type
+
+
+class FakeEntry(object):
+ type = FakeType()
+
+
+class TestGraph(TestCase):
+ def test_deepcopy(self):
+ lhs, rhs = FakeNode(), FakeNode()
+ entry = FakeEntry()
+ entry.pos = lhs.pos
+
+ name_ass = NameAssignment(lhs, rhs, entry)
+ ass = deepcopy(name_ass)
+ self.assertTrue(ass.lhs)
+ self.assertTrue(ass.rhs)
+ self.assertTrue(ass.entry)
+ self.assertEqual(ass.pos, name_ass.pos)
+ self.assertFalse(ass.is_arg)
+ self.assertFalse(ass.is_deletion)
+
+ static_ass = StaticAssignment(entry)
+ ass = deepcopy(static_ass)
+ self.assertTrue(ass.lhs)
+ self.assertTrue(ass.rhs)
+ self.assertTrue(ass.entry)
+ self.assertEqual(ass.pos, static_ass.pos)
+ self.assertFalse(ass.is_arg)
+ self.assertFalse(ass.is_deletion)
+
+ arg_ass = Argument(lhs, rhs, entry)
+ ass = deepcopy(arg_ass)
+ self.assertTrue(ass.lhs)
+ self.assertTrue(ass.rhs)
+ self.assertTrue(ass.entry)
+ self.assertEqual(ass.pos, arg_ass.pos)
+ self.assertTrue(ass.is_arg)
+ self.assertFalse(ass.is_deletion)
+
+ name_del = NameDeletion(lhs, entry)
+ ass = deepcopy(name_del)
+ self.assertTrue(ass.lhs)
+ self.assertTrue(ass.rhs)
+ self.assertTrue(ass.entry)
+ self.assertEqual(ass.pos, name_del.pos)
+ self.assertFalse(ass.is_arg)
+ self.assertTrue(ass.is_deletion)
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py b/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py
new file mode 100644
index 0000000000..3dddc960b3
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestGrammar.py
@@ -0,0 +1,129 @@
+# mode: run
+# tag: syntax
+
+"""
+Uses TreeFragment to test invalid syntax.
+"""
+
+from __future__ import absolute_import
+
+from ...TestUtils import CythonTest
+from ..Errors import CompileError
+from .. import ExprNodes
+
+# Copied from CPython's test_grammar.py
+VALID_UNDERSCORE_LITERALS = [
+ '0_0_0',
+ '4_2',
+ '1_0000_0000',
+ '0b1001_0100',
+ '0xffff_ffff',
+ '0o5_7_7',
+ '1_00_00.5',
+ '1_00_00.5j',
+ '1_00_00.5e5',
+ '1_00_00j',
+ '1_00_00e5_1',
+ '1e1_0',
+ '.1_4',
+ '.1_4e1',
+ '.1_4j',
+]
+
+# Copied from CPython's test_grammar.py
+INVALID_UNDERSCORE_LITERALS = [
+ # Trailing underscores:
+ '0_',
+ '42_',
+ '1.4j_',
+ '0b1_',
+ '0xf_',
+ '0o5_',
+ # Underscores in the base selector:
+ '0_b0',
+ '0_xf',
+ '0_o5',
+ # Underscore right after the base selector:
+ '0b_0',
+ '0x_f',
+ '0o_5',
+ # Old-style octal, still disallowed:
+ #'0_7',
+ #'09_99',
+ # Special case with exponent:
+ '0 if 1_Else 1',
+ # Underscore right before a dot:
+ '1_.4',
+ '1_.4j',
+ # Underscore right after a dot:
+ '1._4',
+ '1._4j',
+ '._5',
+ # Underscore right after a sign:
+ '1.0e+_1',
+ # Multiple consecutive underscores:
+ '4_______2',
+ '0.1__4',
+ '0b1001__0100',
+ '0xffff__ffff',
+ '0o5__77',
+ '1e1__0',
+ # Underscore right before j:
+ '1.4_j',
+ '1.4e5_j',
+ # Underscore right before e:
+ '1_e1',
+ '1.4_e1',
+ # Underscore right after e:
+ '1e_1',
+ '1.4e_1',
+ # Whitespace in literals
+ '1_ 2',
+ '1 _2',
+ '1_2.2_ 1',
+ '1_2.2 _1',
+ '1_2e _1',
+ '1_2e2 _1',
+ '1_2e 2_1',
+]
+
+
+class TestGrammar(CythonTest):
+
+ def test_invalid_number_literals(self):
+ for literal in INVALID_UNDERSCORE_LITERALS:
+ for expression in ['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']:
+ code = 'x = ' + expression % literal
+ try:
+ self.fragment(u'''\
+ # cython: language_level=3
+ ''' + code)
+ except CompileError as exc:
+ assert code in [s.strip() for s in str(exc).splitlines()], str(exc)
+ else:
+ assert False, "Invalid Cython code '%s' failed to raise an exception" % code
+
+ def test_valid_number_literals(self):
+ for literal in VALID_UNDERSCORE_LITERALS:
+ for i, expression in enumerate(['%s', '1 + %s', '%s + 1', '2 * %s', '%s * 2']):
+ code = 'x = ' + expression % literal
+ node = self.fragment(u'''\
+ # cython: language_level=3
+ ''' + code).root
+ assert node is not None
+
+ literal_node = node.stats[0].rhs # StatListNode([SingleAssignmentNode('x', expr)])
+ if i > 0:
+ # Add/MulNode() -> literal is first or second operand
+ literal_node = literal_node.operand2 if i % 2 else literal_node.operand1
+ if 'j' in literal or 'J' in literal:
+ assert isinstance(literal_node, ExprNodes.ImagNode)
+ elif '.' in literal or 'e' in literal or 'E' in literal and not ('0x' in literal or '0X' in literal):
+ assert isinstance(literal_node, ExprNodes.FloatNode)
+ else:
+ assert isinstance(literal_node, ExprNodes.IntNode)
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py b/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py
new file mode 100644
index 0000000000..3792f26e99
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py
@@ -0,0 +1,71 @@
+from Cython.TestUtils import CythonTest
+import Cython.Compiler.Errors as Errors
+from Cython.Compiler.Nodes import *
+from Cython.Compiler.ParseTreeTransforms import *
+from Cython.Compiler.Buffer import *
+
+
+class TestMemviewParsing(CythonTest):
+
+ def parse(self, s):
+ return self.should_not_fail(lambda: self.fragment(s)).root
+
+ def not_parseable(self, expected_error, s):
+ e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
+ self.assertEqual(expected_error, e.message_only)
+
+ def test_default_1dim(self):
+ self.parse(u"cdef int[:] x")
+ self.parse(u"cdef short int[:] x")
+
+ def test_default_ndim(self):
+ self.parse(u"cdef int[:,:,:,:,:] x")
+ self.parse(u"cdef unsigned long int[:,:,:,:,:] x")
+ self.parse(u"cdef unsigned int[:,:,:,:,:] x")
+
+ def test_zero_offset(self):
+ self.parse(u"cdef long double[0:] x")
+ self.parse(u"cdef int[0:] x")
+
+ def test_zero_offset_ndim(self):
+ self.parse(u"cdef int[0:,0:,0:,0:] x")
+
+ def test_def_arg(self):
+ self.parse(u"def foo(int[:,:] x): pass")
+
+ def test_cdef_arg(self):
+ self.parse(u"cdef foo(int[:,:] x): pass")
+
+ def test_general_slice(self):
+ self.parse(u'cdef float[::ptr, ::direct & contig, 0::full & strided] x')
+
+ def test_non_slice_memview(self):
+ self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
+ u"cdef double[:foo, bar] x")
+ self.not_parseable(u"An axis specification in memoryview declaration does not have a ':'.",
+ u"cdef double[0:foo, bar] x")
+
+ def test_basic(self):
+ t = self.parse(u"cdef int[:] x")
+ memv_node = t.stats[0].base_type
+ self.assertTrue(isinstance(memv_node, MemoryViewSliceTypeNode))
+
+ # we also test other similar declarations (buffers, anonymous C arrays)
+ # since the parsing has to distinguish between them.
+
+ def disable_test_no_buf_arg(self): # TODO
+ self.not_parseable(u"Expected ']'",
+ u"cdef extern foo(object[int, ndim=2])")
+
+ def disable_test_parse_sizeof(self): # TODO
+ self.parse(u"sizeof(int[NN])")
+ self.parse(u"sizeof(int[])")
+ self.parse(u"sizeof(int[][NN])")
+ self.not_parseable(u"Expected an identifier or literal",
+ u"sizeof(int[:NN])")
+ self.not_parseable(u"Expected ']'",
+ u"sizeof(foo[dtype=bar]")
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestParseTreeTransforms.py b/contrib/tools/cython/Cython/Compiler/Tests/TestParseTreeTransforms.py
new file mode 100644
index 0000000000..8a16f98ccc
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestParseTreeTransforms.py
@@ -0,0 +1,289 @@
+import os.path
+import unittest
+
+from Cython.TestUtils import TransformTest
+from Cython.Compiler.ParseTreeTransforms import *
+from Cython.Compiler.ParseTreeTransforms import _calculate_pickle_checksums
+from Cython.Compiler.Nodes import *
+from Cython.Compiler import Main, Symtab
+
+
+class TestNormalizeTree(TransformTest):
+ def test_parserbehaviour_is_what_we_coded_for(self):
+ t = self.fragment(u"if x: y").root
+ self.assertLines(u"""
+(root): StatListNode
+ stats[0]: IfStatNode
+ if_clauses[0]: IfClauseNode
+ condition: NameNode
+ body: ExprStatNode
+ expr: NameNode
+""", self.treetypes(t))
+
+ def test_wrap_singlestat(self):
+ t = self.run_pipeline([NormalizeTree(None)], u"if x: y")
+ self.assertLines(u"""
+(root): StatListNode
+ stats[0]: IfStatNode
+ if_clauses[0]: IfClauseNode
+ condition: NameNode
+ body: StatListNode
+ stats[0]: ExprStatNode
+ expr: NameNode
+""", self.treetypes(t))
+
+ def test_wrap_multistat(self):
+ t = self.run_pipeline([NormalizeTree(None)], u"""
+ if z:
+ x
+ y
+ """)
+ self.assertLines(u"""
+(root): StatListNode
+ stats[0]: IfStatNode
+ if_clauses[0]: IfClauseNode
+ condition: NameNode
+ body: StatListNode
+ stats[0]: ExprStatNode
+ expr: NameNode
+ stats[1]: ExprStatNode
+ expr: NameNode
+""", self.treetypes(t))
+
+ def test_statinexpr(self):
+ t = self.run_pipeline([NormalizeTree(None)], u"""
+ a, b = x, y
+ """)
+ self.assertLines(u"""
+(root): StatListNode
+ stats[0]: SingleAssignmentNode
+ lhs: TupleNode
+ args[0]: NameNode
+ args[1]: NameNode
+ rhs: TupleNode
+ args[0]: NameNode
+ args[1]: NameNode
+""", self.treetypes(t))
+
+ def test_wrap_offagain(self):
+ t = self.run_pipeline([NormalizeTree(None)], u"""
+ x
+ y
+ if z:
+ x
+ """)
+ self.assertLines(u"""
+(root): StatListNode
+ stats[0]: ExprStatNode
+ expr: NameNode
+ stats[1]: ExprStatNode
+ expr: NameNode
+ stats[2]: IfStatNode
+ if_clauses[0]: IfClauseNode
+ condition: NameNode
+ body: StatListNode
+ stats[0]: ExprStatNode
+ expr: NameNode
+""", self.treetypes(t))
+
+
+ def test_pass_eliminated(self):
+ t = self.run_pipeline([NormalizeTree(None)], u"pass")
+ self.assertTrue(len(t.stats) == 0)
+
+class TestWithTransform(object): # (TransformTest): # Disabled!
+
+ def test_simplified(self):
+ t = self.run_pipeline([WithTransform(None)], u"""
+ with x:
+ y = z ** 3
+ """)
+
+ self.assertCode(u"""
+
+ $0_0 = x
+ $0_2 = $0_0.__exit__
+ $0_0.__enter__()
+ $0_1 = True
+ try:
+ try:
+ $1_0 = None
+ y = z ** 3
+ except:
+ $0_1 = False
+ if (not $0_2($1_0)):
+ raise
+ finally:
+ if $0_1:
+ $0_2(None, None, None)
+
+ """, t)
+
+ def test_basic(self):
+ t = self.run_pipeline([WithTransform(None)], u"""
+ with x as y:
+ y = z ** 3
+ """)
+ self.assertCode(u"""
+
+ $0_0 = x
+ $0_2 = $0_0.__exit__
+ $0_3 = $0_0.__enter__()
+ $0_1 = True
+ try:
+ try:
+ $1_0 = None
+ y = $0_3
+ y = z ** 3
+ except:
+ $0_1 = False
+ if (not $0_2($1_0)):
+ raise
+ finally:
+ if $0_1:
+ $0_2(None, None, None)
+
+ """, t)
+
+
+class TestInterpretCompilerDirectives(TransformTest):
+ """
+ This class tests the parallel directives AST-rewriting and importing.
+ """
+
+ # Test the parallel directives (c)importing
+
+ import_code = u"""
+ cimport cython.parallel
+ cimport cython.parallel as par
+ from cython cimport parallel as par2
+ from cython cimport parallel
+
+ from cython.parallel cimport threadid as tid
+ from cython.parallel cimport threadavailable as tavail
+ from cython.parallel cimport prange
+ """
+
+ expected_directives_dict = {
+ u'cython.parallel': u'cython.parallel',
+ u'par': u'cython.parallel',
+ u'par2': u'cython.parallel',
+ u'parallel': u'cython.parallel',
+
+ u"tid": u"cython.parallel.threadid",
+ u"tavail": u"cython.parallel.threadavailable",
+ u"prange": u"cython.parallel.prange",
+ }
+
+
+ def setUp(self):
+ super(TestInterpretCompilerDirectives, self).setUp()
+
+ compilation_options = Main.CompilationOptions(Main.default_options)
+ ctx = compilation_options.create_context()
+
+ transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives)
+ transform.module_scope = Symtab.ModuleScope('__main__', None, ctx)
+ self.pipeline = [transform]
+
+ self.debug_exception_on_error = DebugFlags.debug_exception_on_error
+
+ def tearDown(self):
+ DebugFlags.debug_exception_on_error = self.debug_exception_on_error
+
+ def test_parallel_directives_cimports(self):
+ self.run_pipeline(self.pipeline, self.import_code)
+ parallel_directives = self.pipeline[0].parallel_directives
+ self.assertEqual(parallel_directives, self.expected_directives_dict)
+
+ def test_parallel_directives_imports(self):
+ self.run_pipeline(self.pipeline,
+ self.import_code.replace(u'cimport', u'import'))
+ parallel_directives = self.pipeline[0].parallel_directives
+ self.assertEqual(parallel_directives, self.expected_directives_dict)
+
+
+# TODO: Re-enable once they're more robust.
+if False:
+ from Cython.Debugger import DebugWriter
+ from Cython.Debugger.Tests.TestLibCython import DebuggerTestCase
+else:
+ # skip test, don't let it inherit unittest.TestCase
+ DebuggerTestCase = object
+
+
+class TestDebugTransform(DebuggerTestCase):
+
+ def elem_hasattrs(self, elem, attrs):
+ return all(attr in elem.attrib for attr in attrs)
+
+ def test_debug_info(self):
+ try:
+ assert os.path.exists(self.debug_dest)
+
+ t = DebugWriter.etree.parse(self.debug_dest)
+ # the xpath of the standard ElementTree is primitive, don't use
+ # anything fancy
+ L = list(t.find('/Module/Globals'))
+ assert L
+ xml_globals = dict((e.attrib['name'], e.attrib['type']) for e in L)
+ self.assertEqual(len(L), len(xml_globals))
+
+ L = list(t.find('/Module/Functions'))
+ assert L
+ xml_funcs = dict((e.attrib['qualified_name'], e) for e in L)
+ self.assertEqual(len(L), len(xml_funcs))
+
+ # test globals
+ self.assertEqual('CObject', xml_globals.get('c_var'))
+ self.assertEqual('PythonObject', xml_globals.get('python_var'))
+
+ # test functions
+ funcnames = ('codefile.spam', 'codefile.ham', 'codefile.eggs',
+ 'codefile.closure', 'codefile.inner')
+ required_xml_attrs = 'name', 'cname', 'qualified_name'
+ assert all(f in xml_funcs for f in funcnames)
+ spam, ham, eggs = [xml_funcs[funcname] for funcname in funcnames]
+
+ self.assertEqual(spam.attrib['name'], 'spam')
+ self.assertNotEqual('spam', spam.attrib['cname'])
+ assert self.elem_hasattrs(spam, required_xml_attrs)
+
+ # test locals of functions
+ spam_locals = list(spam.find('Locals'))
+ assert spam_locals
+ spam_locals.sort(key=lambda e: e.attrib['name'])
+ names = [e.attrib['name'] for e in spam_locals]
+ self.assertEqual(list('abcd'), names)
+ assert self.elem_hasattrs(spam_locals[0], required_xml_attrs)
+
+ # test arguments of functions
+ spam_arguments = list(spam.find('Arguments'))
+ assert spam_arguments
+ self.assertEqual(1, len(list(spam_arguments)))
+
+ # test step-into functions
+ step_into = spam.find('StepIntoFunctions')
+ spam_stepinto = [x.attrib['name'] for x in step_into]
+ assert spam_stepinto
+ self.assertEqual(2, len(spam_stepinto))
+ assert 'puts' in spam_stepinto
+ assert 'some_c_function' in spam_stepinto
+ except:
+ f = open(self.debug_dest)
+ try:
+ print(f.read())
+ finally:
+ f.close()
+ raise
+
+
+class TestAnalyseDeclarationsTransform(unittest.TestCase):
+ def test_calculate_pickle_checksums(self):
+ checksums = _calculate_pickle_checksums(['member1', 'member2', 'member3'])
+ assert 2 <= len(checksums) <= 3, checksums # expecting ['0xc0af380' (MD5), '0x0c75bd4', '0xa7a7b94']
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py b/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py
new file mode 100644
index 0000000000..166bb225b9
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestSignatureMatching.py
@@ -0,0 +1,73 @@
+import unittest
+
+from Cython.Compiler import PyrexTypes as pt
+from Cython.Compiler.ExprNodes import NameNode
+from Cython.Compiler.PyrexTypes import CFuncTypeArg
+
+def cfunctype(*arg_types):
+ return pt.CFuncType(pt.c_int_type,
+ [ CFuncTypeArg("name", arg_type, None) for arg_type in arg_types ])
+
+def cppclasstype(name, base_classes):
+ return pt.CppClassType(name, None, 'CPP_'+name, base_classes)
+
+class SignatureMatcherTest(unittest.TestCase):
+ """
+ Test the signature matching algorithm for overloaded signatures.
+ """
+ def assertMatches(self, expected_type, arg_types, functions):
+ match = pt.best_match(arg_types, functions)
+ if expected_type is not None:
+ self.assertNotEqual(None, match)
+ self.assertEqual(expected_type, match.type)
+
+ def test_cpp_reference_single_arg(self):
+ function_types = [
+ cfunctype(pt.CReferenceType(pt.c_int_type)),
+ cfunctype(pt.CReferenceType(pt.c_long_type)),
+ cfunctype(pt.CReferenceType(pt.c_double_type)),
+ ]
+
+ functions = [ NameNode(None, type=t) for t in function_types ]
+ self.assertMatches(function_types[0], [pt.c_int_type], functions)
+ self.assertMatches(function_types[1], [pt.c_long_type], functions)
+ self.assertMatches(function_types[2], [pt.c_double_type], functions)
+
+ def test_cpp_reference_two_args(self):
+ function_types = [
+ cfunctype(
+ pt.CReferenceType(pt.c_int_type), pt.CReferenceType(pt.c_long_type)),
+ cfunctype(
+ pt.CReferenceType(pt.c_long_type), pt.CReferenceType(pt.c_long_type)),
+ ]
+
+ functions = [ NameNode(None, type=t) for t in function_types ]
+ self.assertMatches(function_types[0], [pt.c_int_type, pt.c_long_type], functions)
+ self.assertMatches(function_types[1], [pt.c_long_type, pt.c_long_type], functions)
+ self.assertMatches(function_types[1], [pt.c_long_type, pt.c_int_type], functions)
+
+ def test_cpp_reference_cpp_class(self):
+ classes = [ cppclasstype("Test%d"%i, []) for i in range(2) ]
+ function_types = [
+ cfunctype(pt.CReferenceType(classes[0])),
+ cfunctype(pt.CReferenceType(classes[1])),
+ ]
+
+ functions = [ NameNode(None, type=t) for t in function_types ]
+ self.assertMatches(function_types[0], [classes[0]], functions)
+ self.assertMatches(function_types[1], [classes[1]], functions)
+
+ def test_cpp_reference_cpp_class_and_int(self):
+ classes = [ cppclasstype("Test%d"%i, []) for i in range(2) ]
+ function_types = [
+ cfunctype(pt.CReferenceType(classes[0]), pt.c_int_type),
+ cfunctype(pt.CReferenceType(classes[0]), pt.c_long_type),
+ cfunctype(pt.CReferenceType(classes[1]), pt.c_int_type),
+ cfunctype(pt.CReferenceType(classes[1]), pt.c_long_type),
+ ]
+
+ functions = [ NameNode(None, type=t) for t in function_types ]
+ self.assertMatches(function_types[0], [classes[0], pt.c_int_type], functions)
+ self.assertMatches(function_types[1], [classes[0], pt.c_long_type], functions)
+ self.assertMatches(function_types[2], [classes[1], pt.c_int_type], functions)
+ self.assertMatches(function_types[3], [classes[1], pt.c_long_type], functions)
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestStringEncoding.py b/contrib/tools/cython/Cython/Compiler/Tests/TestStringEncoding.py
new file mode 100644
index 0000000000..91d099333a
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestStringEncoding.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+import sys
+import unittest
+
+import Cython.Compiler.StringEncoding as StringEncoding
+
+
+class StringEncodingTest(unittest.TestCase):
+ """
+ Test the StringEncoding module.
+ """
+ def test_string_contains_lone_surrogates(self):
+ self.assertFalse(StringEncoding.string_contains_lone_surrogates(u"abc"))
+ self.assertFalse(StringEncoding.string_contains_lone_surrogates(u"\uABCD"))
+ self.assertFalse(StringEncoding.string_contains_lone_surrogates(u"\N{SNOWMAN}"))
+
+ # This behaves differently in Py2 when freshly parsed and read from a .pyc file,
+ # but it seems to be a marshalling bug in Py2, which doesn't hurt us in Cython.
+ if sys.version_info[0] != 2:
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(u"\uD800\uDFFF"))
+
+ # In Py2 with 16bit Unicode, the following is indistinguishable from the 32bit character.
+ obfuscated_surrogate_pair = (u"\uDFFF" + "\uD800")[::-1]
+ if sys.version_info[0] == 2 and sys.maxunicode == 65565:
+ self.assertFalse(StringEncoding.string_contains_lone_surrogates(obfuscated_surrogate_pair))
+ else:
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(obfuscated_surrogate_pair))
+
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(u"\uD800"))
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(u"\uDFFF"))
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(u"\uDFFF\uD800"))
+ self.assertTrue(StringEncoding.string_contains_lone_surrogates(u"\uD800x\uDFFF"))
+
+ def test_string_contains_surrogates(self):
+ self.assertFalse(StringEncoding.string_contains_surrogates(u"abc"))
+ self.assertFalse(StringEncoding.string_contains_surrogates(u"\uABCD"))
+ self.assertFalse(StringEncoding.string_contains_surrogates(u"\N{SNOWMAN}"))
+
+ self.assertTrue(StringEncoding.string_contains_surrogates(u"\uD800"))
+ self.assertTrue(StringEncoding.string_contains_surrogates(u"\uDFFF"))
+ self.assertTrue(StringEncoding.string_contains_surrogates(u"\uD800\uDFFF"))
+ self.assertTrue(StringEncoding.string_contains_surrogates(u"\uDFFF\uD800"))
+ self.assertTrue(StringEncoding.string_contains_surrogates(u"\uD800x\uDFFF"))
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py
new file mode 100644
index 0000000000..9ee8da5478
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py
@@ -0,0 +1,64 @@
+from Cython.TestUtils import CythonTest
+from Cython.Compiler.TreeFragment import *
+from Cython.Compiler.Nodes import *
+from Cython.Compiler.UtilNodes import *
+import Cython.Compiler.Naming as Naming
+
+class TestTreeFragments(CythonTest):
+
+ def test_basic(self):
+ F = self.fragment(u"x = 4")
+ T = F.copy()
+ self.assertCode(u"x = 4", T)
+
+ def test_copy_is_taken(self):
+ F = self.fragment(u"if True: x = 4")
+ T1 = F.root
+ T2 = F.copy()
+ self.assertEqual("x", T2.stats[0].if_clauses[0].body.lhs.name)
+ T2.stats[0].if_clauses[0].body.lhs.name = "other"
+ self.assertEqual("x", T1.stats[0].if_clauses[0].body.lhs.name)
+
+ def test_substitutions_are_copied(self):
+ T = self.fragment(u"y + y").substitute({"y": NameNode(pos=None, name="x")})
+ self.assertEqual("x", T.stats[0].expr.operand1.name)
+ self.assertEqual("x", T.stats[0].expr.operand2.name)
+ self.assertTrue(T.stats[0].expr.operand1 is not T.stats[0].expr.operand2)
+
+ def test_substitution(self):
+ F = self.fragment(u"x = 4")
+ y = NameNode(pos=None, name=u"y")
+ T = F.substitute({"x" : y})
+ self.assertCode(u"y = 4", T)
+
+ def test_exprstat(self):
+ F = self.fragment(u"PASS")
+ pass_stat = PassStatNode(pos=None)
+ T = F.substitute({"PASS" : pass_stat})
+ self.assertTrue(isinstance(T.stats[0], PassStatNode), T)
+
+ def test_pos_is_transferred(self):
+ F = self.fragment(u"""
+ x = y
+ x = u * v ** w
+ """)
+ T = F.substitute({"v" : NameNode(pos=None, name="a")})
+ v = F.root.stats[1].rhs.operand2.operand1
+ a = T.stats[1].rhs.operand2.operand1
+ self.assertEqual(v.pos, a.pos)
+
+ def test_temps(self):
+ TemplateTransform.temp_name_counter = 0
+ F = self.fragment(u"""
+ TMP
+ x = TMP
+ """)
+ T = F.substitute(temps=[u"TMP"])
+ s = T.body.stats
+ self.assertTrue(isinstance(s[0].expr, TempRefNode))
+ self.assertTrue(isinstance(s[1].rhs, TempRefNode))
+ self.assertTrue(s[0].expr.handle is s[1].rhs.handle)
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py
new file mode 100644
index 0000000000..bee53b3d2b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py
@@ -0,0 +1,94 @@
+import unittest
+from Cython.Compiler.Visitor import PrintTree
+from Cython.TestUtils import TransformTest
+from Cython.Compiler.TreePath import find_first, find_all
+from Cython.Compiler import Nodes, ExprNodes
+
+class TestTreePath(TransformTest):
+ _tree = None
+
+ def _build_tree(self):
+ if self._tree is None:
+ self._tree = self.run_pipeline([], u"""
+ def decorator(fun): # DefNode
+ return fun # ReturnStatNode, NameNode
+ @decorator # NameNode
+ def decorated(): # DefNode
+ pass
+ """)
+ return self._tree
+
+ def test_node_path(self):
+ t = self._build_tree()
+ self.assertEqual(2, len(find_all(t, "//DefNode")))
+ self.assertEqual(2, len(find_all(t, "//NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
+
+ def test_node_path_star(self):
+ t = self._build_tree()
+ self.assertEqual(10, len(find_all(t, "//*")))
+ self.assertEqual(8, len(find_all(t, "//DefNode//*")))
+ self.assertEqual(0, len(find_all(t, "//NameNode//*")))
+
+ def test_node_path_attribute(self):
+ t = self._build_tree()
+ self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
+ self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
+
+ def test_node_path_attribute_dotted(self):
+ t = self._build_tree()
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
+ self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
+
+ def test_node_path_child(self):
+ t = self._build_tree()
+ self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
+
+ def test_node_path_node_predicate(self):
+ t = self._build_tree()
+ self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//ReturnStatNode[./NameNode]")))
+
+ def test_node_path_node_predicate_step(self):
+ t = self._build_tree()
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+
+ def test_node_path_attribute_exists(self):
+ t = self._build_tree()
+ self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
+ self.assertEqual(ExprNodes.NameNode,
+ type(find_first(t, "//NameNode[@name]")))
+
+ def test_node_path_attribute_exists_not(self):
+ t = self._build_tree()
+ self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
+
+ def test_node_path_and(self):
+ t = self._build_tree()
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
+
+ def test_node_path_attribute_string_predicate(self):
+ t = self._build_tree()
+ self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
+
+ def test_node_path_recursive_predicate(self):
+ t = self._build_tree()
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py
new file mode 100644
index 0000000000..f2f6f3773b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py
@@ -0,0 +1,19 @@
+from __future__ import absolute_import
+
+import unittest
+
+import Cython.Compiler.PyrexTypes as PT
+
+
+class TestMethodDispatcherTransform(unittest.TestCase):
+
+ def test_widest_numeric_type(self):
+ def assert_widest(type1, type2, widest):
+ self.assertEqual(widest, PT.widest_numeric_type(type1, type2))
+
+ assert_widest(PT.c_int_type, PT.c_long_type, PT.c_long_type)
+ assert_widest(PT.c_double_type, PT.c_long_type, PT.c_double_type)
+ assert_widest(PT.c_longdouble_type, PT.c_long_type, PT.c_longdouble_type)
+
+ cenum = PT.CEnumType("E", "cenum", typedef_flag=False)
+ assert_widest(PT.c_int_type, cenum, PT.c_int_type)
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py b/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py
new file mode 100644
index 0000000000..3d1906ca0b
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py
@@ -0,0 +1,101 @@
+import unittest
+
+from Cython.Compiler import Code, UtilityCode
+
+
+def strip_2tup(tup):
+ return tup[0] and tup[0].strip(), tup[1] and tup[1].strip()
+
+class TestUtilityLoader(unittest.TestCase):
+ """
+ Test loading UtilityCodes
+ """
+
+ expected = "test {{loader}} prototype", "test {{loader}} impl"
+
+ required = "req {{loader}} proto", "req {{loader}} impl"
+
+ context = dict(loader='Loader')
+
+ name = "TestUtilityLoader"
+ filename = "TestUtilityLoader.c"
+ cls = Code.UtilityCode
+
+ def test_load_as_string(self):
+ got = strip_2tup(self.cls.load_as_string(self.name))
+ self.assertEqual(got, self.expected)
+
+ got = strip_2tup(self.cls.load_as_string(self.name, self.filename))
+ self.assertEqual(got, self.expected)
+
+ def test_load(self):
+ utility = self.cls.load(self.name)
+ got = strip_2tup((utility.proto, utility.impl))
+ self.assertEqual(got, self.expected)
+
+ required, = utility.requires
+ got = strip_2tup((required.proto, required.impl))
+ self.assertEqual(got, self.required)
+
+ utility = self.cls.load(self.name, from_file=self.filename)
+ got = strip_2tup((utility.proto, utility.impl))
+ self.assertEqual(got, self.expected)
+
+ utility = self.cls.load_cached(self.name, from_file=self.filename)
+ got = strip_2tup((utility.proto, utility.impl))
+ self.assertEqual(got, self.expected)
+
+
+class TestTempitaUtilityLoader(TestUtilityLoader):
+ """
+ Test loading UtilityCodes with Tempita substitution
+ """
+ expected_tempita = (TestUtilityLoader.expected[0].replace('{{loader}}', 'Loader'),
+ TestUtilityLoader.expected[1].replace('{{loader}}', 'Loader'))
+
+ required_tempita = (TestUtilityLoader.required[0].replace('{{loader}}', 'Loader'),
+ TestUtilityLoader.required[1].replace('{{loader}}', 'Loader'))
+
+ cls = Code.TempitaUtilityCode
+
+ def test_load_as_string(self):
+ got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
+ self.assertEqual(got, self.expected_tempita)
+
+ def test_load(self):
+ utility = self.cls.load(self.name, context=self.context)
+ got = strip_2tup((utility.proto, utility.impl))
+ self.assertEqual(got, self.expected_tempita)
+
+ required, = utility.requires
+ got = strip_2tup((required.proto, required.impl))
+ self.assertEqual(got, self.required_tempita)
+
+ utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
+ got = strip_2tup((utility.proto, utility.impl))
+ self.assertEqual(got, self.expected_tempita)
+
+
+class TestCythonUtilityLoader(TestTempitaUtilityLoader):
+ """
+ Test loading CythonUtilityCodes
+ """
+
+ # Just change the attributes and run the same tests
+ expected = None, "test {{cy_loader}} impl"
+ expected_tempita = None, "test CyLoader impl"
+
+ required = None, "req {{cy_loader}} impl"
+ required_tempita = None, "req CyLoader impl"
+
+ context = dict(cy_loader='CyLoader')
+
+ name = "TestCyUtilityLoader"
+ filename = "TestCyUtilityLoader.pyx"
+ cls = UtilityCode.CythonUtilityCode
+
+ # Small hack to pass our tests above
+ cls.proto = None
+
+ test_load = TestUtilityLoader.test_load
+ test_load_tempita = TestTempitaUtilityLoader.test_load
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestVisitor.py b/contrib/tools/cython/Cython/Compiler/Tests/TestVisitor.py
new file mode 100644
index 0000000000..dbc8e0c03a
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestVisitor.py
@@ -0,0 +1,61 @@
+from Cython.Compiler.ModuleNode import ModuleNode
+from Cython.Compiler.Symtab import ModuleScope
+from Cython.TestUtils import TransformTest
+from Cython.Compiler.Visitor import MethodDispatcherTransform
+from Cython.Compiler.ParseTreeTransforms import (
+ NormalizeTree, AnalyseDeclarationsTransform,
+ AnalyseExpressionsTransform, InterpretCompilerDirectives)
+
+
+class TestMethodDispatcherTransform(TransformTest):
+ _tree = None
+
+ def _build_tree(self):
+ if self._tree is None:
+ context = None
+
+ def fake_module(node):
+ scope = ModuleScope('test', None, None)
+ return ModuleNode(node.pos, doc=None, body=node,
+ scope=scope, full_module_name='test',
+ directive_comments={})
+ pipeline = [
+ fake_module,
+ NormalizeTree(context),
+ InterpretCompilerDirectives(context, {}),
+ AnalyseDeclarationsTransform(context),
+ AnalyseExpressionsTransform(context),
+ ]
+ self._tree = self.run_pipeline(pipeline, u"""
+ cdef bytes s = b'asdfg'
+ cdef dict d = {1:2}
+ x = s * 3
+ d.get('test')
+ """)
+ return self._tree
+
+ def test_builtin_method(self):
+ calls = [0]
+ class Test(MethodDispatcherTransform):
+ def _handle_simple_method_dict_get(self, node, func, args, unbound):
+ calls[0] += 1
+ return node
+
+ tree = self._build_tree()
+ Test(None)(tree)
+ self.assertEqual(1, calls[0])
+
+ def test_binop_method(self):
+ calls = {'bytes': 0, 'object': 0}
+ class Test(MethodDispatcherTransform):
+ def _handle_simple_method_bytes___mul__(self, node, func, args, unbound):
+ calls['bytes'] += 1
+ return node
+ def _handle_simple_method_object___mul__(self, node, func, args, unbound):
+ calls['object'] += 1
+ return node
+
+ tree = self._build_tree()
+ Test(None)(tree)
+ self.assertEqual(1, calls['bytes'])
+ self.assertEqual(0, calls['object'])
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/__init__.py b/contrib/tools/cython/Cython/Compiler/Tests/__init__.py
new file mode 100644
index 0000000000..fa81adaff6
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Tests/__init__.py
@@ -0,0 +1 @@
+# empty file
diff --git a/contrib/tools/cython/Cython/Compiler/TreeFragment.py b/contrib/tools/cython/Cython/Compiler/TreeFragment.py
new file mode 100644
index 0000000000..b85da8191a
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/TreeFragment.py
@@ -0,0 +1,275 @@
+#
+# TreeFragments - parsing of strings to trees
+#
+
+"""
+Support for parsing strings into code trees.
+"""
+
+from __future__ import absolute_import
+
+import re
+from io import StringIO
+
+from .Scanning import PyrexScanner, StringSourceDescriptor
+from .Symtab import ModuleScope
+from . import PyrexTypes
+from .Visitor import VisitorTransform
+from .Nodes import Node, StatListNode
+from .ExprNodes import NameNode
+from .StringEncoding import _unicode
+from . import Parsing
+from . import Main
+from . import UtilNodes
+
+
+class StringParseContext(Main.Context):
+ def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
+ if include_directories is None:
+ include_directories = []
+ if compiler_directives is None:
+ compiler_directives = {}
+ # TODO: see if "language_level=3" also works for our internal code here.
+ Main.Context.__init__(self, include_directories, compiler_directives, cpp=cpp, language_level=2)
+ self.module_name = name
+
+ def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True):
+ if module_name not in (self.module_name, 'cython'):
+ raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
+ return ModuleScope(module_name, parent_module=None, context=self)
+
+
+def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None,
+ context=None, allow_struct_enum_decorator=False):
+ """
+ Utility method to parse a (unicode) string of code. This is mostly
+ used for internal Cython compiler purposes (creating code snippets
+ that transforms should emit, as well as unit testing).
+
+ code - a unicode string containing Cython (module-level) code
+ name - a descriptive name for the code source (to use in error messages etc.)
+
+ RETURNS
+
+ The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is
+ set to the scope used when parsing.
+ """
+ if context is None:
+ context = StringParseContext(name)
+ # Since source files carry an encoding, it makes sense in this context
+ # to use a unicode string so that code fragments don't have to bother
+ # with encoding. This means that test code passed in should not have an
+ # encoding header.
+ assert isinstance(code, _unicode), "unicode code snippets only please"
+ encoding = "UTF-8"
+
+ module_name = name
+ if initial_pos is None:
+ initial_pos = (name, 1, 0)
+ code_source = StringSourceDescriptor(name, code)
+
+ scope = context.find_module(module_name, pos=initial_pos, need_pxd=False)
+
+ buf = StringIO(code)
+
+ scanner = PyrexScanner(buf, code_source, source_encoding = encoding,
+ scope = scope, context = context, initial_pos = initial_pos)
+ ctx = Parsing.Ctx(allow_struct_enum_decorator=allow_struct_enum_decorator)
+
+ if level is None:
+ tree = Parsing.p_module(scanner, 0, module_name, ctx=ctx)
+ tree.scope = scope
+ tree.is_pxd = False
+ else:
+ tree = Parsing.p_code(scanner, level=level, ctx=ctx)
+
+ tree.scope = scope
+ return tree
+
+
+class TreeCopier(VisitorTransform):
+ def visit_Node(self, node):
+ if node is None:
+ return node
+ else:
+ c = node.clone_node()
+ self.visitchildren(c)
+ return c
+
+
+class ApplyPositionAndCopy(TreeCopier):
+ def __init__(self, pos):
+ super(ApplyPositionAndCopy, self).__init__()
+ self.pos = pos
+
+ def visit_Node(self, node):
+ copy = super(ApplyPositionAndCopy, self).visit_Node(node)
+ copy.pos = self.pos
+ return copy
+
+
+class TemplateTransform(VisitorTransform):
+ """
+ Makes a copy of a template tree while doing substitutions.
+
+ A dictionary "substitutions" should be passed in when calling
+ the transform; mapping names to replacement nodes. Then replacement
+ happens like this:
+ - If an ExprStatNode contains a single NameNode, whose name is
+ a key in the substitutions dictionary, the ExprStatNode is
+ replaced with a copy of the tree given in the dictionary.
+ It is the responsibility of the caller that the replacement
+ node is a valid statement.
+ - If a single NameNode is otherwise encountered, it is replaced
+ if its name is listed in the substitutions dictionary in the
+ same way. It is the responsibility of the caller to make sure
+ that the replacement nodes is a valid expression.
+
+ Also a list "temps" should be passed. Any names listed will
+ be transformed into anonymous, temporary names.
+
+ Currently supported for tempnames is:
+ NameNode
+ (various function and class definition nodes etc. should be added to this)
+
+ Each replacement node gets the position of the substituted node
+ recursively applied to every member node.
+ """
+
+ temp_name_counter = 0
+
+ def __call__(self, node, substitutions, temps, pos):
+ self.substitutions = substitutions
+ self.pos = pos
+ tempmap = {}
+ temphandles = []
+ for temp in temps:
+ TemplateTransform.temp_name_counter += 1
+ handle = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ tempmap[temp] = handle
+ temphandles.append(handle)
+ self.tempmap = tempmap
+ result = super(TemplateTransform, self).__call__(node)
+ if temps:
+ result = UtilNodes.TempsBlockNode(self.get_pos(node),
+ temps=temphandles,
+ body=result)
+ return result
+
+ def get_pos(self, node):
+ if self.pos:
+ return self.pos
+ else:
+ return node.pos
+
+ def visit_Node(self, node):
+ if node is None:
+ return None
+ else:
+ c = node.clone_node()
+ if self.pos is not None:
+ c.pos = self.pos
+ self.visitchildren(c)
+ return c
+
+ def try_substitution(self, node, key):
+ sub = self.substitutions.get(key)
+ if sub is not None:
+ pos = self.pos
+ if pos is None: pos = node.pos
+ return ApplyPositionAndCopy(pos)(sub)
+ else:
+ return self.visit_Node(node) # make copy as usual
+
+ def visit_NameNode(self, node):
+ temphandle = self.tempmap.get(node.name)
+ if temphandle:
+ # Replace name with temporary
+ return temphandle.ref(self.get_pos(node))
+ else:
+ return self.try_substitution(node, node.name)
+
+ def visit_ExprStatNode(self, node):
+ # If an expression-as-statement consists of only a replaceable
+ # NameNode, we replace the entire statement, not only the NameNode
+ if isinstance(node.expr, NameNode):
+ return self.try_substitution(node, node.expr.name)
+ else:
+ return self.visit_Node(node)
+
+
+def copy_code_tree(node):
+ return TreeCopier()(node)
+
+
+_match_indent = re.compile(u"^ *").match
+
+
+def strip_common_indent(lines):
+ """Strips empty lines and common indentation from the list of strings given in lines"""
+ # TODO: Facilitate textwrap.indent instead
+ lines = [x for x in lines if x.strip() != u""]
+ if lines:
+ minindent = min([len(_match_indent(x).group(0)) for x in lines])
+ lines = [x[minindent:] for x in lines]
+ return lines
+
+
+class TreeFragment(object):
+ def __init__(self, code, name=None, pxds=None, temps=None, pipeline=None, level=None, initial_pos=None):
+ if pxds is None:
+ pxds = {}
+ if temps is None:
+ temps = []
+ if pipeline is None:
+ pipeline = []
+ if not name:
+ name = "(tree fragment)"
+
+ if isinstance(code, _unicode):
+ def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
+
+ fmt_code = fmt(code)
+ fmt_pxds = {}
+ for key, value in pxds.items():
+ fmt_pxds[key] = fmt(value)
+ mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos)
+ if level is None:
+ t = t.body # Make sure a StatListNode is at the top
+ if not isinstance(t, StatListNode):
+ t = StatListNode(pos=mod.pos, stats=[t])
+ for transform in pipeline:
+ if transform is None:
+ continue
+ t = transform(t)
+ self.root = t
+ elif isinstance(code, Node):
+ if pxds:
+ raise NotImplementedError()
+ self.root = code
+ else:
+ raise ValueError("Unrecognized code format (accepts unicode and Node)")
+ self.temps = temps
+
+ def copy(self):
+ return copy_code_tree(self.root)
+
+ def substitute(self, nodes=None, temps=None, pos = None):
+ if nodes is None:
+ nodes = {}
+ if temps is None:
+ temps = []
+ return TemplateTransform()(self.root,
+ substitutions = nodes,
+ temps = self.temps + temps, pos = pos)
+
+
+class SetPosTransform(VisitorTransform):
+ def __init__(self, pos):
+ super(SetPosTransform, self).__init__()
+ self.pos = pos
+
+ def visit_Node(self, node):
+ node.pos = self.pos
+ self.visitchildren(node)
+ return node
diff --git a/contrib/tools/cython/Cython/Compiler/TreePath.py b/contrib/tools/cython/Cython/Compiler/TreePath.py
new file mode 100644
index 0000000000..8585905557
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/TreePath.py
@@ -0,0 +1,296 @@
+"""
+A simple XPath-like language for tree traversal.
+
+This works by creating a filter chain of generator functions. Each
+function selects a part of the expression, e.g. a child node, a
+specific descendant or a node that holds an attribute.
+"""
+
+from __future__ import absolute_import
+
+import re
+import operator
+import sys
+
+if sys.version_info[0] >= 3:
+ _unicode = str
+else:
+ _unicode = unicode
+
+path_tokenizer = re.compile(
+ r"("
+ r"'[^']*'|\"[^\"]*\"|"
+ r"//?|"
+ r"\(\)|"
+ r"==?|"
+ r"[/.*\[\]()@])|"
+ r"([^/\[\]()@=\s]+)|"
+ r"\s+"
+ ).findall
+
+def iterchildren(node, attr_name):
+ # returns an iterable of all child nodes of that name
+ child = getattr(node, attr_name)
+ if child is not None:
+ if type(child) is list:
+ return child
+ else:
+ return [child]
+ else:
+ return ()
+
+def _get_first_or_none(it):
+ try:
+ try:
+ _next = it.next
+ except AttributeError:
+ return next(it)
+ else:
+ return _next()
+ except StopIteration:
+ return None
+
+def type_name(node):
+ return node.__class__.__name__.split('.')[-1]
+
+def parse_func(next, token):
+ name = token[1]
+ token = next()
+ if token[0] != '(':
+ raise ValueError("Expected '(' after function name '%s'" % name)
+ predicate = handle_predicate(next, token)
+ return name, predicate
+
+def handle_func_not(next, token):
+ """
+ not(...)
+ """
+ name, predicate = parse_func(next, token)
+
+ def select(result):
+ for node in result:
+ if _get_first_or_none(predicate([node])) is None:
+ yield node
+ return select
+
+def handle_name(next, token):
+ """
+ /NodeName/
+ or
+ func(...)
+ """
+ name = token[1]
+ if name in functions:
+ return functions[name](next, token)
+ def select(result):
+ for node in result:
+ for attr_name in node.child_attrs:
+ for child in iterchildren(node, attr_name):
+ if type_name(child) == name:
+ yield child
+ return select
+
+def handle_star(next, token):
+ """
+ /*/
+ """
+ def select(result):
+ for node in result:
+ for name in node.child_attrs:
+ for child in iterchildren(node, name):
+ yield child
+ return select
+
+def handle_dot(next, token):
+ """
+ /./
+ """
+ def select(result):
+ return result
+ return select
+
+def handle_descendants(next, token):
+ """
+ //...
+ """
+ token = next()
+ if token[0] == "*":
+ def iter_recursive(node):
+ for name in node.child_attrs:
+ for child in iterchildren(node, name):
+ yield child
+ for c in iter_recursive(child):
+ yield c
+ elif not token[0]:
+ node_name = token[1]
+ def iter_recursive(node):
+ for name in node.child_attrs:
+ for child in iterchildren(node, name):
+ if type_name(child) == node_name:
+ yield child
+ for c in iter_recursive(child):
+ yield c
+ else:
+ raise ValueError("Expected node name after '//'")
+
+ def select(result):
+ for node in result:
+ for child in iter_recursive(node):
+ yield child
+
+ return select
+
+
+def handle_attribute(next, token):
+ token = next()
+ if token[0]:
+ raise ValueError("Expected attribute name")
+ name = token[1]
+ value = None
+ try:
+ token = next()
+ except StopIteration:
+ pass
+ else:
+ if token[0] == '=':
+ value = parse_path_value(next)
+ readattr = operator.attrgetter(name)
+ if value is None:
+ def select(result):
+ for node in result:
+ try:
+ attr_value = readattr(node)
+ except AttributeError:
+ continue
+ if attr_value is not None:
+ yield attr_value
+ else:
+ def select(result):
+ for node in result:
+ try:
+ attr_value = readattr(node)
+ except AttributeError:
+ continue
+ if attr_value == value:
+ yield attr_value
+ elif (isinstance(attr_value, bytes) and isinstance(value, _unicode) and
+ attr_value == value.encode()):
+ # allow a bytes-to-string comparison too
+ yield attr_value
+
+ return select
+
+
+def parse_path_value(next):
+ token = next()
+ value = token[0]
+ if value:
+ if value[:1] == "'" or value[:1] == '"':
+ return value[1:-1]
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ elif token[1].isdigit():
+ return int(token[1])
+ else:
+ name = token[1].lower()
+ if name == 'true':
+ return True
+ elif name == 'false':
+ return False
+ raise ValueError("Invalid attribute predicate: '%s'" % value)
+
+def handle_predicate(next, token):
+ token = next()
+ selector = []
+ while token[0] != ']':
+ selector.append( operations[token[0]](next, token) )
+ try:
+ token = next()
+ except StopIteration:
+ break
+ else:
+ if token[0] == "/":
+ token = next()
+
+ if not token[0] and token[1] == 'and':
+ return logical_and(selector, handle_predicate(next, token))
+
+ def select(result):
+ for node in result:
+ subresult = iter((node,))
+ for select in selector:
+ subresult = select(subresult)
+ predicate_result = _get_first_or_none(subresult)
+ if predicate_result is not None:
+ yield node
+ return select
+
+def logical_and(lhs_selects, rhs_select):
+ def select(result):
+ for node in result:
+ subresult = iter((node,))
+ for select in lhs_selects:
+ subresult = select(subresult)
+ predicate_result = _get_first_or_none(subresult)
+ subresult = iter((node,))
+ if predicate_result is not None:
+ for result_node in rhs_select(subresult):
+ yield node
+ return select
+
+
+operations = {
+ "@": handle_attribute,
+ "": handle_name,
+ "*": handle_star,
+ ".": handle_dot,
+ "//": handle_descendants,
+ "[": handle_predicate,
+ }
+
+functions = {
+ 'not' : handle_func_not
+ }
+
+def _build_path_iterator(path):
+ # parse pattern
+ stream = iter([ (special,text)
+ for (special,text) in path_tokenizer(path)
+ if special or text ])
+ try:
+ _next = stream.next
+ except AttributeError:
+ # Python 3
+ def _next():
+ return next(stream)
+ token = _next()
+ selector = []
+ while 1:
+ try:
+ selector.append(operations[token[0]](_next, token))
+ except StopIteration:
+ raise ValueError("invalid path")
+ try:
+ token = _next()
+ if token[0] == "/":
+ token = _next()
+ except StopIteration:
+ break
+ return selector
+
+# main module API
+
+def iterfind(node, path):
+ selector_chain = _build_path_iterator(path)
+ result = iter((node,))
+ for select in selector_chain:
+ result = select(result)
+ return result
+
+def find_first(node, path):
+ return _get_first_or_none(iterfind(node, path))
+
+def find_all(node, path):
+ return list(iterfind(node, path))
diff --git a/contrib/tools/cython/Cython/Compiler/TypeInference.py b/contrib/tools/cython/Cython/Compiler/TypeInference.py
new file mode 100644
index 0000000000..c7ffee7d24
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/TypeInference.py
@@ -0,0 +1,591 @@
+from __future__ import absolute_import
+
+from .Errors import error, message
+from . import ExprNodes
+from . import Nodes
+from . import Builtin
+from . import PyrexTypes
+from .. import Utils
+from .PyrexTypes import py_object_type, unspecified_type
+from .Visitor import CythonTransform, EnvTransform
+
+try:
+ reduce
+except NameError:
+ from functools import reduce
+
+
+class TypedExprNode(ExprNodes.ExprNode):
+ # Used for declaring assignments of a specified type without a known entry.
+ subexprs = []
+
+ def __init__(self, type, pos=None):
+ super(TypedExprNode, self).__init__(pos, type=type)
+
+object_expr = TypedExprNode(py_object_type)
+
+
+class MarkParallelAssignments(EnvTransform):
+ # Collects assignments inside parallel blocks prange, with parallel.
+ # Perhaps it's better to move it to ControlFlowAnalysis.
+
+ # tells us whether we're in a normal loop
+ in_loop = False
+
+ parallel_errors = False
+
+ def __init__(self, context):
+ # Track the parallel block scopes (with parallel, for i in prange())
+ self.parallel_block_stack = []
+ super(MarkParallelAssignments, self).__init__(context)
+
+ def mark_assignment(self, lhs, rhs, inplace_op=None):
+ if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
+ if lhs.entry is None:
+ # TODO: This shouldn't happen...
+ return
+
+ if self.parallel_block_stack:
+ parallel_node = self.parallel_block_stack[-1]
+ previous_assignment = parallel_node.assignments.get(lhs.entry)
+
+ # If there was a previous assignment to the variable, keep the
+ # previous assignment position
+ if previous_assignment:
+ pos, previous_inplace_op = previous_assignment
+
+ if (inplace_op and previous_inplace_op and
+ inplace_op != previous_inplace_op):
+ # x += y; x *= y
+ t = (inplace_op, previous_inplace_op)
+ error(lhs.pos,
+ "Reduction operator '%s' is inconsistent "
+ "with previous reduction operator '%s'" % t)
+ else:
+ pos = lhs.pos
+
+ parallel_node.assignments[lhs.entry] = (pos, inplace_op)
+ parallel_node.assigned_nodes.append(lhs)
+
+ elif isinstance(lhs, ExprNodes.SequenceNode):
+ for i, arg in enumerate(lhs.args):
+ if not rhs or arg.is_starred:
+ item_node = None
+ else:
+ item_node = rhs.inferable_item_node(i)
+ self.mark_assignment(arg, item_node)
+ else:
+ # Could use this info to infer cdef class attributes...
+ pass
+
+ def visit_WithTargetAssignmentStatNode(self, node):
+ self.mark_assignment(node.lhs, node.with_node.enter_call)
+ self.visitchildren(node)
+ return node
+
+ def visit_SingleAssignmentNode(self, node):
+ self.mark_assignment(node.lhs, node.rhs)
+ self.visitchildren(node)
+ return node
+
+ def visit_CascadedAssignmentNode(self, node):
+ for lhs in node.lhs_list:
+ self.mark_assignment(lhs, node.rhs)
+ self.visitchildren(node)
+ return node
+
+ def visit_InPlaceAssignmentNode(self, node):
+ self.mark_assignment(node.lhs, node.create_binop_node(), node.operator)
+ self.visitchildren(node)
+ return node
+
+ def visit_ForInStatNode(self, node):
+ # TODO: Remove redundancy with range optimization...
+ is_special = False
+ sequence = node.iterator.sequence
+ target = node.target
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = self.current_env().lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name == 'reversed' and len(sequence.args) == 1:
+ sequence = sequence.args[0]
+ elif function.name == 'enumerate' and len(sequence.args) == 1:
+ if target.is_sequence_constructor and len(target.args) == 2:
+ iterator = sequence.args[0]
+ if iterator.is_name:
+ iterator_type = iterator.infer_type(self.current_env())
+ if iterator_type.is_builtin_type:
+ # assume that builtin types have a length within Py_ssize_t
+ self.mark_assignment(
+ target.args[0],
+ ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type))
+ target = target.args[1]
+ sequence = sequence.args[0]
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = self.current_env().lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name in ('range', 'xrange'):
+ is_special = True
+ for arg in sequence.args[:2]:
+ self.mark_assignment(target, arg)
+ if len(sequence.args) > 2:
+ self.mark_assignment(
+ target,
+ ExprNodes.binop_node(node.pos,
+ '+',
+ sequence.args[0],
+ sequence.args[2]))
+
+ if not is_special:
+ # A for-loop basically translates to subsequent calls to
+ # __getitem__(), so using an IndexNode here allows us to
+ # naturally infer the base type of pointers, C arrays,
+ # Python strings, etc., while correctly falling back to an
+ # object type when the base type cannot be handled.
+ self.mark_assignment(target, ExprNodes.IndexNode(
+ node.pos,
+ base=sequence,
+ index=ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type)))
+
+ self.visitchildren(node)
+ return node
+
+ def visit_ForFromStatNode(self, node):
+ self.mark_assignment(node.target, node.bound1)
+ if node.step is not None:
+ self.mark_assignment(node.target,
+ ExprNodes.binop_node(node.pos,
+ '+',
+ node.bound1,
+ node.step))
+ self.visitchildren(node)
+ return node
+
+ def visit_WhileStatNode(self, node):
+ self.visitchildren(node)
+ return node
+
+ def visit_ExceptClauseNode(self, node):
+ if node.target is not None:
+ self.mark_assignment(node.target, object_expr)
+ self.visitchildren(node)
+ return node
+
+ def visit_FromCImportStatNode(self, node):
+ pass # Can't be assigned to...
+
+ def visit_FromImportStatNode(self, node):
+ for name, target in node.items:
+ if name != "*":
+ self.mark_assignment(target, object_expr)
+ self.visitchildren(node)
+ return node
+
+ def visit_DefNode(self, node):
+ # use fake expressions with the right result type
+ if node.star_arg:
+ self.mark_assignment(
+ node.star_arg, TypedExprNode(Builtin.tuple_type, node.pos))
+ if node.starstar_arg:
+ self.mark_assignment(
+ node.starstar_arg, TypedExprNode(Builtin.dict_type, node.pos))
+ EnvTransform.visit_FuncDefNode(self, node)
+ return node
+
+ def visit_DelStatNode(self, node):
+ for arg in node.args:
+ self.mark_assignment(arg, arg)
+ self.visitchildren(node)
+ return node
+
+ def visit_ParallelStatNode(self, node):
+ if self.parallel_block_stack:
+ node.parent = self.parallel_block_stack[-1]
+ else:
+ node.parent = None
+
+ nested = False
+ if node.is_prange:
+ if not node.parent:
+ node.is_parallel = True
+ else:
+ node.is_parallel = (node.parent.is_prange or not
+ node.parent.is_parallel)
+ nested = node.parent.is_prange
+ else:
+ node.is_parallel = True
+ # Note: nested with parallel() blocks are handled by
+ # ParallelRangeTransform!
+ # nested = node.parent
+ nested = node.parent and node.parent.is_prange
+
+ self.parallel_block_stack.append(node)
+
+ nested = nested or len(self.parallel_block_stack) > 2
+ if not self.parallel_errors and nested and not node.is_prange:
+ error(node.pos, "Only prange() may be nested")
+ self.parallel_errors = True
+
+ if node.is_prange:
+ child_attrs = node.child_attrs
+ node.child_attrs = ['body', 'target', 'args']
+ self.visitchildren(node)
+ node.child_attrs = child_attrs
+
+ self.parallel_block_stack.pop()
+ if node.else_clause:
+ node.else_clause = self.visit(node.else_clause)
+ else:
+ self.visitchildren(node)
+ self.parallel_block_stack.pop()
+
+ self.parallel_errors = False
+ return node
+
+ def visit_YieldExprNode(self, node):
+ if self.parallel_block_stack:
+ error(node.pos, "'%s' not allowed in parallel sections" % node.expr_keyword)
+ return node
+
+ def visit_ReturnStatNode(self, node):
+ node.in_parallel = bool(self.parallel_block_stack)
+ return node
+
+
+class MarkOverflowingArithmetic(CythonTransform):
+
+ # It may be possible to integrate this with the above for
+ # performance improvements (though likely not worth it).
+
+ might_overflow = False
+
+ def __call__(self, root):
+ self.env_stack = []
+ self.env = root.scope
+ return super(MarkOverflowingArithmetic, self).__call__(root)
+
+ def visit_safe_node(self, node):
+ self.might_overflow, saved = False, self.might_overflow
+ self.visitchildren(node)
+ self.might_overflow = saved
+ return node
+
+ def visit_neutral_node(self, node):
+ self.visitchildren(node)
+ return node
+
+ def visit_dangerous_node(self, node):
+ self.might_overflow, saved = True, self.might_overflow
+ self.visitchildren(node)
+ self.might_overflow = saved
+ return node
+
+ def visit_FuncDefNode(self, node):
+ self.env_stack.append(self.env)
+ self.env = node.local_scope
+ self.visit_safe_node(node)
+ self.env = self.env_stack.pop()
+ return node
+
+ def visit_NameNode(self, node):
+ if self.might_overflow:
+ entry = node.entry or self.env.lookup(node.name)
+ if entry:
+ entry.might_overflow = True
+ return node
+
+ def visit_BinopNode(self, node):
+ if node.operator in '&|^':
+ return self.visit_neutral_node(node)
+ else:
+ return self.visit_dangerous_node(node)
+
+ def visit_SimpleCallNode(self, node):
+ if node.function.is_name and node.function.name == 'abs':
+ # Overflows for minimum value of fixed size ints.
+ return self.visit_dangerous_node(node)
+ else:
+ return self.visit_neutral_node(node)
+
+ visit_UnopNode = visit_neutral_node
+
+ visit_UnaryMinusNode = visit_dangerous_node
+
+ visit_InPlaceAssignmentNode = visit_dangerous_node
+
+ visit_Node = visit_safe_node
+
+ def visit_assignment(self, lhs, rhs):
+ if (isinstance(rhs, ExprNodes.IntNode)
+ and isinstance(lhs, ExprNodes.NameNode)
+ and Utils.long_literal(rhs.value)):
+ entry = lhs.entry or self.env.lookup(lhs.name)
+ if entry:
+ entry.might_overflow = True
+
+ def visit_SingleAssignmentNode(self, node):
+ self.visit_assignment(node.lhs, node.rhs)
+ self.visitchildren(node)
+ return node
+
+ def visit_CascadedAssignmentNode(self, node):
+ for lhs in node.lhs_list:
+ self.visit_assignment(lhs, node.rhs)
+ self.visitchildren(node)
+ return node
+
+class PyObjectTypeInferer(object):
+ """
+ If it's not declared, it's a PyObject.
+ """
+ def infer_types(self, scope):
+ """
+ Given a dict of entries, map all unspecified types to a specified type.
+ """
+ for name, entry in scope.entries.items():
+ if entry.type is unspecified_type:
+ entry.type = py_object_type
+
+class SimpleAssignmentTypeInferer(object):
+ """
+ Very basic type inference.
+
+ Note: in order to support cross-closure type inference, this must be
+ applies to nested scopes in top-down order.
+ """
+ def set_entry_type(self, entry, entry_type):
+ entry.type = entry_type
+ for e in entry.all_entries():
+ e.type = entry_type
+
+ def infer_types(self, scope):
+ enabled = scope.directives['infer_types']
+ verbose = scope.directives['infer_types.verbose']
+
+ if enabled == True:
+ spanning_type = aggressive_spanning_type
+ elif enabled is None: # safe mode
+ spanning_type = safe_spanning_type
+ else:
+ for entry in scope.entries.values():
+ if entry.type is unspecified_type:
+ self.set_entry_type(entry, py_object_type)
+ return
+
+ # Set of assignments
+ assignments = set()
+ assmts_resolved = set()
+ dependencies = {}
+ assmt_to_names = {}
+
+ for name, entry in scope.entries.items():
+ for assmt in entry.cf_assignments:
+ names = assmt.type_dependencies()
+ assmt_to_names[assmt] = names
+ assmts = set()
+ for node in names:
+ assmts.update(node.cf_state)
+ dependencies[assmt] = assmts
+ if entry.type is unspecified_type:
+ assignments.update(entry.cf_assignments)
+ else:
+ assmts_resolved.update(entry.cf_assignments)
+
+ def infer_name_node_type(node):
+ types = [assmt.inferred_type for assmt in node.cf_state]
+ if not types:
+ node_type = py_object_type
+ else:
+ entry = node.entry
+ node_type = spanning_type(
+ types, entry.might_overflow, entry.pos, scope)
+ node.inferred_type = node_type
+
+ def infer_name_node_type_partial(node):
+ types = [assmt.inferred_type for assmt in node.cf_state
+ if assmt.inferred_type is not None]
+ if not types:
+ return
+ entry = node.entry
+ return spanning_type(types, entry.might_overflow, entry.pos, scope)
+
+ def inferred_types(entry):
+ has_none = False
+ has_pyobjects = False
+ types = []
+ for assmt in entry.cf_assignments:
+ if assmt.rhs.is_none:
+ has_none = True
+ else:
+ rhs_type = assmt.inferred_type
+ if rhs_type and rhs_type.is_pyobject:
+ has_pyobjects = True
+ types.append(rhs_type)
+ # Ignore None assignments as long as there are concrete Python type assignments.
+ # but include them if None is the only assigned Python object.
+ if has_none and not has_pyobjects:
+ types.append(py_object_type)
+ return types
+
+ def resolve_assignments(assignments):
+ resolved = set()
+ for assmt in assignments:
+ deps = dependencies[assmt]
+ # All assignments are resolved
+ if assmts_resolved.issuperset(deps):
+ for node in assmt_to_names[assmt]:
+ infer_name_node_type(node)
+ # Resolve assmt
+ inferred_type = assmt.infer_type()
+ assmts_resolved.add(assmt)
+ resolved.add(assmt)
+ assignments.difference_update(resolved)
+ return resolved
+
+ def partial_infer(assmt):
+ partial_types = []
+ for node in assmt_to_names[assmt]:
+ partial_type = infer_name_node_type_partial(node)
+ if partial_type is None:
+ return False
+ partial_types.append((node, partial_type))
+ for node, partial_type in partial_types:
+ node.inferred_type = partial_type
+ assmt.infer_type()
+ return True
+
+ partial_assmts = set()
+ def resolve_partial(assignments):
+ # try to handle circular references
+ partials = set()
+ for assmt in assignments:
+ if assmt in partial_assmts:
+ continue
+ if partial_infer(assmt):
+ partials.add(assmt)
+ assmts_resolved.add(assmt)
+ partial_assmts.update(partials)
+ return partials
+
+ # Infer assignments
+ while True:
+ if not resolve_assignments(assignments):
+ if not resolve_partial(assignments):
+ break
+ inferred = set()
+ # First pass
+ for entry in scope.entries.values():
+ if entry.type is not unspecified_type:
+ continue
+ entry_type = py_object_type
+ if assmts_resolved.issuperset(entry.cf_assignments):
+ types = inferred_types(entry)
+ if types and all(types):
+ entry_type = spanning_type(
+ types, entry.might_overflow, entry.pos, scope)
+ inferred.add(entry)
+ self.set_entry_type(entry, entry_type)
+
+ def reinfer():
+ dirty = False
+ for entry in inferred:
+ for assmt in entry.cf_assignments:
+ assmt.infer_type()
+ types = inferred_types(entry)
+ new_type = spanning_type(types, entry.might_overflow, entry.pos, scope)
+ if new_type != entry.type:
+ self.set_entry_type(entry, new_type)
+ dirty = True
+ return dirty
+
+ # types propagation
+ while reinfer():
+ pass
+
+ if verbose:
+ for entry in inferred:
+ message(entry.pos, "inferred '%s' to be of type '%s'" % (
+ entry.name, entry.type))
+
+
+def find_spanning_type(type1, type2):
+ if type1 is type2:
+ result_type = type1
+ elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type:
+ # type inference can break the coercion back to a Python bool
+ # if it returns an arbitrary int type here
+ return py_object_type
+ else:
+ result_type = PyrexTypes.spanning_type(type1, type2)
+ if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type,
+ Builtin.float_type):
+ # Python's float type is just a C double, so it's safe to
+ # use the C type instead
+ return PyrexTypes.c_double_type
+ return result_type
+
+def simply_type(result_type, pos):
+ if result_type.is_reference:
+ result_type = result_type.ref_base_type
+ if result_type.is_const:
+ result_type = result_type.const_base_type
+ if result_type.is_cpp_class:
+ result_type.check_nullary_constructor(pos)
+ if result_type.is_array:
+ result_type = PyrexTypes.c_ptr_type(result_type.base_type)
+ return result_type
+
+def aggressive_spanning_type(types, might_overflow, pos, scope):
+ return simply_type(reduce(find_spanning_type, types), pos)
+
+def safe_spanning_type(types, might_overflow, pos, scope):
+ result_type = simply_type(reduce(find_spanning_type, types), pos)
+ if result_type.is_pyobject:
+ # In theory, any specific Python type is always safe to
+ # infer. However, inferring str can cause some existing code
+ # to break, since we are also now much more strict about
+ # coercion from str to char *. See trac #553.
+ if result_type.name == 'str':
+ return py_object_type
+ else:
+ return result_type
+ elif result_type is PyrexTypes.c_double_type:
+ # Python's float type is just a C double, so it's safe to use
+ # the C type instead
+ return result_type
+ elif result_type is PyrexTypes.c_bint_type:
+ # find_spanning_type() only returns 'bint' for clean boolean
+ # operations without other int types, so this is safe, too
+ return result_type
+ elif result_type.is_pythran_expr:
+ return result_type
+ elif result_type.is_ptr:
+ # Any pointer except (signed|unsigned|) char* can't implicitly
+ # become a PyObject, and inferring char* is now accepted, too.
+ return result_type
+ elif result_type.is_cpp_class:
+ # These can't implicitly become Python objects either.
+ return result_type
+ elif result_type.is_struct:
+ # Though we have struct -> object for some structs, this is uncommonly
+ # used, won't arise in pure Python, and there shouldn't be side
+ # effects, so I'm declaring this safe.
+ return result_type
+ # TODO: double complex should be OK as well, but we need
+ # to make sure everything is supported.
+ elif (result_type.is_int or result_type.is_enum) and not might_overflow:
+ return result_type
+ elif (not result_type.can_coerce_to_pyobject(scope)
+ and not result_type.is_error):
+ return result_type
+ return py_object_type
+
+
+def get_type_inferer():
+ return SimpleAssignmentTypeInferer()
diff --git a/contrib/tools/cython/Cython/Compiler/TypeSlots.py b/contrib/tools/cython/Cython/Compiler/TypeSlots.py
new file mode 100644
index 0000000000..c6867447d2
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/TypeSlots.py
@@ -0,0 +1,941 @@
+#
+# Tables describing slots in the CPython type object
+# and associated know-how.
+#
+
+from __future__ import absolute_import
+
+from . import Naming
+from . import PyrexTypes
+from .Errors import error
+
+invisible = ['__cinit__', '__dealloc__', '__richcmp__',
+ '__nonzero__', '__bool__']
+
+richcmp_special_methods = ['__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__']
+
+
+class Signature(object):
+ # Method slot signature descriptor.
+ #
+ # has_dummy_arg boolean
+ # has_generic_args boolean
+ # fixed_arg_format string
+ # ret_format string
+ # error_value string
+ #
+ # The formats are strings made up of the following
+ # characters:
+ #
+ # 'O' Python object
+ # 'T' Python object of the type of 'self'
+ # 'v' void
+ # 'p' void *
+ # 'P' void **
+ # 'i' int
+ # 'b' bint
+ # 'I' int *
+ # 'l' long
+ # 'f' float
+ # 'd' double
+ # 'h' Py_hash_t
+ # 'z' Py_ssize_t
+ # 'Z' Py_ssize_t *
+ # 's' char *
+ # 'S' char **
+ # 'r' int used only to signal exception
+ # 'B' Py_buffer *
+ # '-' dummy 'self' argument (not used)
+ # '*' rest of args passed as generic Python
+ # arg tuple and kw dict (must be last
+ # char in format string)
+
+ format_map = {
+ 'O': PyrexTypes.py_object_type,
+ 'v': PyrexTypes.c_void_type,
+ 'p': PyrexTypes.c_void_ptr_type,
+ 'P': PyrexTypes.c_void_ptr_ptr_type,
+ 'i': PyrexTypes.c_int_type,
+ 'b': PyrexTypes.c_bint_type,
+ 'I': PyrexTypes.c_int_ptr_type,
+ 'l': PyrexTypes.c_long_type,
+ 'f': PyrexTypes.c_float_type,
+ 'd': PyrexTypes.c_double_type,
+ 'h': PyrexTypes.c_py_hash_t_type,
+ 'z': PyrexTypes.c_py_ssize_t_type,
+ 'Z': PyrexTypes.c_py_ssize_t_ptr_type,
+ 's': PyrexTypes.c_char_ptr_type,
+ 'S': PyrexTypes.c_char_ptr_ptr_type,
+ 'r': PyrexTypes.c_returncode_type,
+ 'B': PyrexTypes.c_py_buffer_ptr_type,
+ # 'T', '-' and '*' are handled otherwise
+ # and are not looked up in here
+ }
+
+ type_to_format_map = dict(
+ (type_, format_) for format_, type_ in format_map.items())
+
+ error_value_map = {
+ 'O': "NULL",
+ 'T': "NULL",
+ 'i': "-1",
+ 'b': "-1",
+ 'l': "-1",
+ 'r': "-1",
+ 'h': "-1",
+ 'z': "-1",
+ }
+
+ def __init__(self, arg_format, ret_format):
+ self.has_dummy_arg = 0
+ self.has_generic_args = 0
+ if arg_format[:1] == '-':
+ self.has_dummy_arg = 1
+ arg_format = arg_format[1:]
+ if arg_format[-1:] == '*':
+ self.has_generic_args = 1
+ arg_format = arg_format[:-1]
+ self.fixed_arg_format = arg_format
+ self.ret_format = ret_format
+ self.error_value = self.error_value_map.get(ret_format, None)
+ self.exception_check = ret_format != 'r' and self.error_value is not None
+ self.is_staticmethod = False
+
+ def __repr__(self):
+ return '<Signature[%s(%s%s)]>' % (
+ self.ret_format,
+ ', '.join(self.fixed_arg_format),
+ '*' if self.has_generic_args else '')
+
+ def num_fixed_args(self):
+ return len(self.fixed_arg_format)
+
+ def is_self_arg(self, i):
+ # argument is 'self' for methods or 'class' for classmethods
+ return self.fixed_arg_format[i] == 'T'
+
+ def returns_self_type(self):
+ # return type is same as 'self' argument type
+ return self.ret_format == 'T'
+
+ def fixed_arg_type(self, i):
+ return self.format_map[self.fixed_arg_format[i]]
+
+ def return_type(self):
+ return self.format_map[self.ret_format]
+
+ def format_from_type(self, arg_type):
+ if arg_type.is_pyobject:
+ arg_type = PyrexTypes.py_object_type
+ return self.type_to_format_map[arg_type]
+
+ def exception_value(self):
+ return self.error_value_map.get(self.ret_format)
+
+ def function_type(self, self_arg_override=None):
+ # Construct a C function type descriptor for this signature
+ args = []
+ for i in range(self.num_fixed_args()):
+ if self_arg_override is not None and self.is_self_arg(i):
+ assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
+ args.append(self_arg_override)
+ else:
+ arg_type = self.fixed_arg_type(i)
+ args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
+ if self_arg_override is not None and self.returns_self_type():
+ ret_type = self_arg_override.type
+ else:
+ ret_type = self.return_type()
+ exc_value = self.exception_value()
+ return PyrexTypes.CFuncType(
+ ret_type, args, exception_value=exc_value,
+ exception_check=self.exception_check)
+
+ def method_flags(self):
+ if self.ret_format == "O":
+ full_args = self.fixed_arg_format
+ if self.has_dummy_arg:
+ full_args = "O" + full_args
+ if full_args in ["O", "T"]:
+ if self.has_generic_args:
+ return [method_varargs, method_keywords]
+ else:
+ return [method_noargs]
+ elif full_args in ["OO", "TO"] and not self.has_generic_args:
+ return [method_onearg]
+
+ if self.is_staticmethod:
+ return [method_varargs, method_keywords]
+ return None
+
+
+class SlotDescriptor(object):
+ # Abstract base class for type slot descriptors.
+ #
+ # slot_name string Member name of the slot in the type object
+ # is_initialised_dynamically Is initialised by code in the module init function
+ # is_inherited Is inherited by subtypes (see PyType_Ready())
+ # py3 Indicates presence of slot in Python 3
+ # py2 Indicates presence of slot in Python 2
+ # ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
+
+ def __init__(self, slot_name, dynamic=False, inherited=False,
+ py3=True, py2=True, ifdef=None):
+ self.slot_name = slot_name
+ self.is_initialised_dynamically = dynamic
+ self.is_inherited = inherited
+ self.ifdef = ifdef
+ self.py3 = py3
+ self.py2 = py2
+
+ def preprocessor_guard_code(self):
+ ifdef = self.ifdef
+ py2 = self.py2
+ py3 = self.py3
+ guard = None
+ if ifdef:
+ guard = ("#if %s" % ifdef)
+ elif not py3 or py3 == '<RESERVED>':
+ guard = ("#if PY_MAJOR_VERSION < 3")
+ elif not py2:
+ guard = ("#if PY_MAJOR_VERSION >= 3")
+ return guard
+
+ def generate(self, scope, code):
+ preprocessor_guard = self.preprocessor_guard_code()
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+
+ end_pypy_guard = False
+ if self.is_initialised_dynamically:
+ value = "0"
+ else:
+ value = self.slot_code(scope)
+ if value == "0" and self.is_inherited:
+ # PyPy currently has a broken PyType_Ready() that fails to
+ # inherit some slots. To work around this, we explicitly
+ # set inherited slots here, but only in PyPy since CPython
+ # handles this better than we do.
+ inherited_value = value
+ current_scope = scope
+ while (inherited_value == "0"
+ and current_scope.parent_type
+ and current_scope.parent_type.base_type
+ and current_scope.parent_type.base_type.scope):
+ current_scope = current_scope.parent_type.base_type.scope
+ inherited_value = self.slot_code(current_scope)
+ if inherited_value != "0":
+ code.putln("#if CYTHON_COMPILING_IN_PYPY")
+ code.putln("%s, /*%s*/" % (inherited_value, self.slot_name))
+ code.putln("#else")
+ end_pypy_guard = True
+
+ code.putln("%s, /*%s*/" % (value, self.slot_name))
+
+ if end_pypy_guard:
+ code.putln("#endif")
+
+ if self.py3 == '<RESERVED>':
+ code.putln("#else")
+ code.putln("0, /*reserved*/")
+ if preprocessor_guard:
+ code.putln("#endif")
+
+ # Some C implementations have trouble statically
+ # initialising a global with a pointer to an extern
+ # function, so we initialise some of the type slots
+ # in the module init function instead.
+
+ def generate_dynamic_init_code(self, scope, code):
+ if self.is_initialised_dynamically:
+ value = self.slot_code(scope)
+ if value != "0":
+ code.putln("%s.%s = %s;" % (
+ scope.parent_type.typeobj_cname,
+ self.slot_name,
+ value
+ )
+ )
+
+
+class FixedSlot(SlotDescriptor):
+ # Descriptor for a type slot with a fixed value.
+ #
+ # value string
+
+ def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
+ SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
+ self.value = value
+
+ def slot_code(self, scope):
+ return self.value
+
+
+class EmptySlot(FixedSlot):
+ # Descriptor for a type slot whose value is always 0.
+
+ def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
+ FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
+
+
+class MethodSlot(SlotDescriptor):
+ # Type slot descriptor for a user-definable method.
+ #
+ # signature Signature
+ # method_name string The __xxx__ name of the method
+ # alternatives [string] Alternative list of __xxx__ names for the method
+
+ def __init__(self, signature, slot_name, method_name, fallback=None,
+ py3=True, py2=True, ifdef=None, inherited=True):
+ SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2,
+ ifdef=ifdef, inherited=inherited)
+ self.signature = signature
+ self.slot_name = slot_name
+ self.method_name = method_name
+ self.alternatives = []
+ method_name_to_slot[method_name] = self
+ #
+ if fallback:
+ self.alternatives.append(fallback)
+ for alt in (self.py2, self.py3):
+ if isinstance(alt, (tuple, list)):
+ slot_name, method_name = alt
+ self.alternatives.append(method_name)
+ method_name_to_slot[method_name] = self
+
+ def slot_code(self, scope):
+ entry = scope.lookup_here(self.method_name)
+ if entry and entry.is_special and entry.func_cname:
+ return entry.func_cname
+ for method_name in self.alternatives:
+ entry = scope.lookup_here(method_name)
+ if entry and entry.is_special and entry.func_cname:
+ return entry.func_cname
+ return "0"
+
+
+class InternalMethodSlot(SlotDescriptor):
+ # Type slot descriptor for a method which is always
+ # synthesized by Cython.
+ #
+ # slot_name string Member name of the slot in the type object
+
+ def __init__(self, slot_name, **kargs):
+ SlotDescriptor.__init__(self, slot_name, **kargs)
+
+ def slot_code(self, scope):
+ return scope.mangle_internal(self.slot_name)
+
+
+class GCDependentSlot(InternalMethodSlot):
+ # Descriptor for a slot whose value depends on whether
+ # the type participates in GC.
+
+ def __init__(self, slot_name, **kargs):
+ InternalMethodSlot.__init__(self, slot_name, **kargs)
+
+ def slot_code(self, scope):
+ if not scope.needs_gc():
+ return "0"
+ if not scope.has_cyclic_pyobject_attrs:
+ # if the type does not have GC relevant object attributes, it can
+ # delegate GC methods to its parent - iff the parent functions
+ # are defined in the same module
+ parent_type_scope = scope.parent_type.base_type.scope
+ if scope.parent_scope is parent_type_scope.parent_scope:
+ entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
+ if entry.visibility != 'extern':
+ return self.slot_code(parent_type_scope)
+ return InternalMethodSlot.slot_code(self, scope)
+
+
+class GCClearReferencesSlot(GCDependentSlot):
+
+ def slot_code(self, scope):
+ if scope.needs_tp_clear():
+ return GCDependentSlot.slot_code(self, scope)
+ return "0"
+
+
+class ConstructorSlot(InternalMethodSlot):
+ # Descriptor for tp_new and tp_dealloc.
+
+ def __init__(self, slot_name, method, **kargs):
+ InternalMethodSlot.__init__(self, slot_name, **kargs)
+ self.method = method
+
+ def slot_code(self, scope):
+ entry = scope.lookup_here(self.method)
+ if (self.slot_name != 'tp_new'
+ and scope.parent_type.base_type
+ and not scope.has_pyobject_attrs
+ and not scope.has_memoryview_attrs
+ and not scope.has_cpp_class_attrs
+ and not (entry and entry.is_special)):
+ # if the type does not have object attributes, it can
+ # delegate GC methods to its parent - iff the parent
+ # functions are defined in the same module
+ parent_type_scope = scope.parent_type.base_type.scope
+ if scope.parent_scope is parent_type_scope.parent_scope:
+ entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
+ if entry.visibility != 'extern':
+ return self.slot_code(parent_type_scope)
+ if entry and not entry.is_special:
+ return "0"
+ return InternalMethodSlot.slot_code(self, scope)
+
+
+class SyntheticSlot(InternalMethodSlot):
+ # Type slot descriptor for a synthesized method which
+ # dispatches to one or more user-defined methods depending
+ # on its arguments. If none of the relevant methods are
+ # defined, the method will not be synthesized and an
+ # alternative default value will be placed in the type
+ # slot.
+
+ def __init__(self, slot_name, user_methods, default_value, **kargs):
+ InternalMethodSlot.__init__(self, slot_name, **kargs)
+ self.user_methods = user_methods
+ self.default_value = default_value
+
+ def slot_code(self, scope):
+ if scope.defines_any_special(self.user_methods):
+ return InternalMethodSlot.slot_code(self, scope)
+ else:
+ return self.default_value
+
+
+class RichcmpSlot(MethodSlot):
+ def slot_code(self, scope):
+ entry = scope.lookup_here(self.method_name)
+ if entry and entry.is_special and entry.func_cname:
+ return entry.func_cname
+ elif scope.defines_any_special(richcmp_special_methods):
+ return scope.mangle_internal(self.slot_name)
+ else:
+ return "0"
+
+
+class TypeFlagsSlot(SlotDescriptor):
+ # Descriptor for the type flags slot.
+
+ def slot_code(self, scope):
+ value = "Py_TPFLAGS_DEFAULT"
+ if scope.directives['type_version_tag']:
+ # it's not in 'Py_TPFLAGS_DEFAULT' in Py2
+ value += "|Py_TPFLAGS_HAVE_VERSION_TAG"
+ else:
+ # it's enabled in 'Py_TPFLAGS_DEFAULT' in Py3
+ value = "(%s&~Py_TPFLAGS_HAVE_VERSION_TAG)" % value
+ value += "|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
+ if not scope.parent_type.is_final_type:
+ value += "|Py_TPFLAGS_BASETYPE"
+ if scope.needs_gc():
+ value += "|Py_TPFLAGS_HAVE_GC"
+ return value
+
+
+class DocStringSlot(SlotDescriptor):
+ # Descriptor for the docstring slot.
+
+ def slot_code(self, scope):
+ doc = scope.doc
+ if doc is None:
+ return "0"
+ if doc.is_unicode:
+ doc = doc.as_utf8_string()
+ return doc.as_c_string_literal()
+
+
+class SuiteSlot(SlotDescriptor):
+ # Descriptor for a substructure of the type object.
+ #
+ # sub_slots [SlotDescriptor]
+
+ def __init__(self, sub_slots, slot_type, slot_name, ifdef=None):
+ SlotDescriptor.__init__(self, slot_name, ifdef=ifdef)
+ self.sub_slots = sub_slots
+ self.slot_type = slot_type
+ substructures.append(self)
+
+ def is_empty(self, scope):
+ for slot in self.sub_slots:
+ if slot.slot_code(scope) != "0":
+ return False
+ return True
+
+ def substructure_cname(self, scope):
+ return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
+
+ def slot_code(self, scope):
+ if not self.is_empty(scope):
+ return "&%s" % self.substructure_cname(scope)
+ return "0"
+
+ def generate_substructure(self, scope, code):
+ if not self.is_empty(scope):
+ code.putln("")
+ if self.ifdef:
+ code.putln("#if %s" % self.ifdef)
+ code.putln(
+ "static %s %s = {" % (
+ self.slot_type,
+ self.substructure_cname(scope)))
+ for slot in self.sub_slots:
+ slot.generate(scope, code)
+ code.putln("};")
+ if self.ifdef:
+ code.putln("#endif")
+
+substructures = [] # List of all SuiteSlot instances
+
+class MethodTableSlot(SlotDescriptor):
+ # Slot descriptor for the method table.
+
+ def slot_code(self, scope):
+ if scope.pyfunc_entries:
+ return scope.method_table_cname
+ else:
+ return "0"
+
+
+class MemberTableSlot(SlotDescriptor):
+ # Slot descriptor for the table of Python-accessible attributes.
+
+ def slot_code(self, scope):
+ return "0"
+
+
+class GetSetSlot(SlotDescriptor):
+ # Slot descriptor for the table of attribute get & set methods.
+
+ def slot_code(self, scope):
+ if scope.property_entries:
+ return scope.getset_table_cname
+ else:
+ return "0"
+
+
+class BaseClassSlot(SlotDescriptor):
+ # Slot descriptor for the base class slot.
+
+ def __init__(self, name):
+ SlotDescriptor.__init__(self, name, dynamic = 1)
+
+ def generate_dynamic_init_code(self, scope, code):
+ base_type = scope.parent_type.base_type
+ if base_type:
+ code.putln("%s.%s = %s;" % (
+ scope.parent_type.typeobj_cname,
+ self.slot_name,
+ base_type.typeptr_cname))
+
+
+class DictOffsetSlot(SlotDescriptor):
+ # Slot descriptor for a class' dict offset, for dynamic attributes.
+
+ def slot_code(self, scope):
+ dict_entry = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
+ if dict_entry and dict_entry.is_variable:
+ if getattr(dict_entry.type, 'cname', None) != 'PyDict_Type':
+ error(dict_entry.pos, "__dict__ slot must be of type 'dict'")
+ return "0"
+ type = scope.parent_type
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ return ("offsetof(%s, %s)" % (
+ objstruct,
+ dict_entry.cname))
+ else:
+ return "0"
+
+
+# The following dictionary maps __xxx__ method names to slot descriptors.
+
+method_name_to_slot = {}
+
+## The following slots are (or could be) initialised with an
+## extern function pointer.
+#
+#slots_initialised_from_extern = (
+# "tp_free",
+#)
+
+#------------------------------------------------------------------------------------------
+#
+# Utility functions for accessing slot table data structures
+#
+#------------------------------------------------------------------------------------------
+
+def get_special_method_signature(name):
+ # Given a method name, if it is a special method,
+ # return its signature, else return None.
+ slot = method_name_to_slot.get(name)
+ if slot:
+ return slot.signature
+ elif name in richcmp_special_methods:
+ return ibinaryfunc
+ else:
+ return None
+
+
+def get_property_accessor_signature(name):
+ # Return signature of accessor for an extension type
+ # property, else None.
+ return property_accessor_signatures.get(name)
+
+
+def get_base_slot_function(scope, slot):
+ # Returns the function implementing this slot in the baseclass.
+ # This is useful for enabling the compiler to optimize calls
+ # that recursively climb the class hierarchy.
+ base_type = scope.parent_type.base_type
+ if scope.parent_scope is base_type.scope.parent_scope:
+ parent_slot = slot.slot_code(base_type.scope)
+ if parent_slot != '0':
+ entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
+ if entry.visibility != 'extern':
+ return parent_slot
+ return None
+
+
+def get_slot_function(scope, slot):
+ # Returns the function implementing this slot in the baseclass.
+ # This is useful for enabling the compiler to optimize calls
+ # that recursively climb the class hierarchy.
+ slot_code = slot.slot_code(scope)
+ if slot_code != '0':
+ entry = scope.parent_scope.lookup_here(scope.parent_type.name)
+ if entry.visibility != 'extern':
+ return slot_code
+ return None
+
+
+def get_slot_by_name(slot_name):
+ # For now, only search the type struct, no referenced sub-structs.
+ for slot in slot_table:
+ if slot.slot_name == slot_name:
+ return slot
+ assert False, "Slot not found: %s" % slot_name
+
+
+def get_slot_code_by_name(scope, slot_name):
+ slot = get_slot_by_name(slot_name)
+ return slot.slot_code(scope)
+
+def is_reverse_number_slot(name):
+ """
+ Tries to identify __radd__ and friends (so the METH_COEXIST flag can be applied).
+
+ There's no great consequence if it inadvertently identifies a few other methods
+ so just use a simple rule rather than an exact list.
+ """
+ if name.startswith("__r") and name.endswith("__"):
+ forward_name = name.replace("r", "", 1)
+ for meth in PyNumberMethods:
+ if getattr(meth, "method_name", None) == forward_name:
+ return True
+ return False
+
+
+#------------------------------------------------------------------------------------------
+#
+# Signatures for generic Python functions and methods.
+#
+#------------------------------------------------------------------------------------------
+
+pyfunction_signature = Signature("-*", "O")
+pymethod_signature = Signature("T*", "O")
+
+#------------------------------------------------------------------------------------------
+#
+# Signatures for simple Python functions.
+#
+#------------------------------------------------------------------------------------------
+
+pyfunction_noargs = Signature("-", "O")
+pyfunction_onearg = Signature("-O", "O")
+
+#------------------------------------------------------------------------------------------
+#
+# Signatures for the various kinds of function that
+# can appear in the type object and its substructures.
+#
+#------------------------------------------------------------------------------------------
+
+unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
+binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
+ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
+ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
+iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
+callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
+inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
+lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
+
+ # typedef int (*coercion)(PyObject **, PyObject **);
+intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
+ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
+intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
+ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
+intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
+ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
+intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
+ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
+
+intintargproc = Signature("Tii", 'r')
+ssizessizeargproc = Signature("Tzz", 'r')
+objargfunc = Signature("TO", "O")
+objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
+readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
+writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
+segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
+charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
+objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
+ # typedef int (*visitproc)(PyObject *, void *);
+ # typedef int (*traverseproc)(PyObject *, visitproc, void *);
+
+destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
+# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
+ # typedef PyObject *(*getattrfunc)(PyObject *, char *);
+getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
+ # typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
+setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
+delattrofunc = Signature("TO", 'r')
+cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
+reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
+hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
+richcmpfunc = Signature("TOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
+getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
+iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
+descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
+descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
+descrdelfunc = Signature("TO", 'r')
+initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
+ # typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
+ # typedef PyObject *(*allocfunc)(struct _typeobject *, int);
+
+getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
+releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
+
+
+#------------------------------------------------------------------------------------------
+#
+# Signatures for accessor methods of properties.
+#
+#------------------------------------------------------------------------------------------
+
+property_accessor_signatures = {
+ '__get__': Signature("T", "O"),
+ '__set__': Signature("TO", 'r'),
+ '__del__': Signature("T", 'r')
+}
+
+#------------------------------------------------------------------------------------------
+#
+# Descriptor tables for the slots of the various type object
+# substructures, in the order they appear in the structure.
+#
+#------------------------------------------------------------------------------------------
+
+PyNumberMethods_Py3_GUARD = "PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)"
+
+PyNumberMethods = (
+ MethodSlot(binaryfunc, "nb_add", "__add__"),
+ MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
+ MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
+ MethodSlot(binaryfunc, "nb_divide", "__div__", ifdef = PyNumberMethods_Py3_GUARD),
+ MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
+ MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
+ MethodSlot(ternaryfunc, "nb_power", "__pow__"),
+ MethodSlot(unaryfunc, "nb_negative", "__neg__"),
+ MethodSlot(unaryfunc, "nb_positive", "__pos__"),
+ MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
+ MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
+ MethodSlot(unaryfunc, "nb_invert", "__invert__"),
+ MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
+ MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
+ MethodSlot(binaryfunc, "nb_and", "__and__"),
+ MethodSlot(binaryfunc, "nb_xor", "__xor__"),
+ MethodSlot(binaryfunc, "nb_or", "__or__"),
+ EmptySlot("nb_coerce", ifdef = PyNumberMethods_Py3_GUARD),
+ MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
+ MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
+ MethodSlot(unaryfunc, "nb_float", "__float__"),
+ MethodSlot(unaryfunc, "nb_oct", "__oct__", ifdef = PyNumberMethods_Py3_GUARD),
+ MethodSlot(unaryfunc, "nb_hex", "__hex__", ifdef = PyNumberMethods_Py3_GUARD),
+
+ # Added in release 2.0
+ MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", ifdef = PyNumberMethods_Py3_GUARD),
+ MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
+ MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
+
+ # Added in release 2.2
+ # The following require the Py_TPFLAGS_HAVE_CLASS flag
+ MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
+ MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
+ MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
+
+ # Added in release 2.5
+ MethodSlot(unaryfunc, "nb_index", "__index__"),
+
+ # Added in release 3.5
+ MethodSlot(binaryfunc, "nb_matrix_multiply", "__matmul__", ifdef="PY_VERSION_HEX >= 0x03050000"),
+ MethodSlot(ibinaryfunc, "nb_inplace_matrix_multiply", "__imatmul__", ifdef="PY_VERSION_HEX >= 0x03050000"),
+)
+
+PySequenceMethods = (
+ MethodSlot(lenfunc, "sq_length", "__len__"),
+ EmptySlot("sq_concat"), # nb_add used instead
+ EmptySlot("sq_repeat"), # nb_multiply used instead
+ SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
+ MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
+ EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
+ SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
+ MethodSlot(cmpfunc, "sq_contains", "__contains__"),
+ EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
+ EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
+)
+
+PyMappingMethods = (
+ MethodSlot(lenfunc, "mp_length", "__len__"),
+ MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
+ SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
+)
+
+PyBufferProcs = (
+ MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
+ MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
+ MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
+ MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
+
+ MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__"),
+ MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__")
+)
+
+PyAsyncMethods = (
+ MethodSlot(unaryfunc, "am_await", "__await__"),
+ MethodSlot(unaryfunc, "am_aiter", "__aiter__"),
+ MethodSlot(unaryfunc, "am_anext", "__anext__"),
+ EmptySlot("am_send", ifdef="PY_VERSION_HEX >= 0x030A00A3"),
+)
+
+#------------------------------------------------------------------------------------------
+#
+# The main slot table. This table contains descriptors for all the
+# top-level type slots, beginning with tp_dealloc, in the order they
+# appear in the type object.
+#
+#------------------------------------------------------------------------------------------
+
+slot_table = (
+ ConstructorSlot("tp_dealloc", '__dealloc__'),
+ EmptySlot("tp_print", ifdef="PY_VERSION_HEX < 0x030800b4"),
+ EmptySlot("tp_vectorcall_offset", ifdef="PY_VERSION_HEX >= 0x030800b4"),
+ EmptySlot("tp_getattr"),
+ EmptySlot("tp_setattr"),
+
+ # tp_compare (Py2) / tp_reserved (Py3<3.5) / tp_as_async (Py3.5+) is always used as tp_as_async in Py3
+ MethodSlot(cmpfunc, "tp_compare", "__cmp__", ifdef="PY_MAJOR_VERSION < 3"),
+ SuiteSlot(PyAsyncMethods, "__Pyx_PyAsyncMethodsStruct", "tp_as_async", ifdef="PY_MAJOR_VERSION >= 3"),
+
+ MethodSlot(reprfunc, "tp_repr", "__repr__"),
+
+ SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
+ SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
+ SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
+
+ MethodSlot(hashfunc, "tp_hash", "__hash__", inherited=False), # Py3 checks for __richcmp__
+ MethodSlot(callfunc, "tp_call", "__call__"),
+ MethodSlot(reprfunc, "tp_str", "__str__"),
+
+ SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
+ SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
+
+ SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
+
+ TypeFlagsSlot("tp_flags"),
+ DocStringSlot("tp_doc"),
+
+ GCDependentSlot("tp_traverse"),
+ GCClearReferencesSlot("tp_clear"),
+
+ RichcmpSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
+
+ EmptySlot("tp_weaklistoffset"),
+
+ MethodSlot(getiterfunc, "tp_iter", "__iter__"),
+ MethodSlot(iternextfunc, "tp_iternext", "__next__"),
+
+ MethodTableSlot("tp_methods"),
+ MemberTableSlot("tp_members"),
+ GetSetSlot("tp_getset"),
+
+ BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
+ EmptySlot("tp_dict"),
+
+ SyntheticSlot("tp_descr_get", ["__get__"], "0"),
+ SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
+
+ DictOffsetSlot("tp_dictoffset"),
+
+ MethodSlot(initproc, "tp_init", "__init__"),
+ EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
+ InternalMethodSlot("tp_new"),
+ EmptySlot("tp_free"),
+
+ EmptySlot("tp_is_gc"),
+ EmptySlot("tp_bases"),
+ EmptySlot("tp_mro"),
+ EmptySlot("tp_cache"),
+ EmptySlot("tp_subclasses"),
+ EmptySlot("tp_weaklist"),
+ EmptySlot("tp_del"),
+ EmptySlot("tp_version_tag"),
+ EmptySlot("tp_finalize", ifdef="PY_VERSION_HEX >= 0x030400a1"),
+ EmptySlot("tp_vectorcall", ifdef="PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)"),
+ EmptySlot("tp_print", ifdef="PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000"),
+ # PyPy specific extension - only here to avoid C compiler warnings.
+ EmptySlot("tp_pypy_flags", ifdef="CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000"),
+)
+
+#------------------------------------------------------------------------------------------
+#
+# Descriptors for special methods which don't appear directly
+# in the type object or its substructures. These methods are
+# called from slot functions synthesized by Cython.
+#
+#------------------------------------------------------------------------------------------
+
+MethodSlot(initproc, "", "__cinit__")
+MethodSlot(destructor, "", "__dealloc__")
+MethodSlot(objobjargproc, "", "__setitem__")
+MethodSlot(objargproc, "", "__delitem__")
+MethodSlot(ssizessizeobjargproc, "", "__setslice__")
+MethodSlot(ssizessizeargproc, "", "__delslice__")
+MethodSlot(getattrofunc, "", "__getattr__")
+MethodSlot(getattrofunc, "", "__getattribute__")
+MethodSlot(setattrofunc, "", "__setattr__")
+MethodSlot(delattrofunc, "", "__delattr__")
+MethodSlot(descrgetfunc, "", "__get__")
+MethodSlot(descrsetfunc, "", "__set__")
+MethodSlot(descrdelfunc, "", "__delete__")
+
+
+# Method flags for python-exposed methods.
+
+method_noargs = "METH_NOARGS"
+method_onearg = "METH_O"
+method_varargs = "METH_VARARGS"
+method_keywords = "METH_KEYWORDS"
+method_coexist = "METH_COEXIST"
diff --git a/contrib/tools/cython/Cython/Compiler/UtilNodes.py b/contrib/tools/cython/Cython/Compiler/UtilNodes.py
new file mode 100644
index 0000000000..c41748ace0
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/UtilNodes.py
@@ -0,0 +1,359 @@
+#
+# Nodes used as utilities and support for transforms etc.
+# These often make up sets including both Nodes and ExprNodes
+# so it is convenient to have them in a separate module.
+#
+
+from __future__ import absolute_import
+
+from . import Nodes
+from . import ExprNodes
+from .Nodes import Node
+from .ExprNodes import AtomicExprNode
+from .PyrexTypes import c_ptr_type
+
+
+class TempHandle(object):
+ # THIS IS DEPRECATED, USE LetRefNode instead
+ temp = None
+ needs_xdecref = False
+ def __init__(self, type, needs_cleanup=None):
+ self.type = type
+ if needs_cleanup is None:
+ self.needs_cleanup = type.is_pyobject
+ else:
+ self.needs_cleanup = needs_cleanup
+
+ def ref(self, pos):
+ return TempRefNode(pos, handle=self, type=self.type)
+
+
+class TempRefNode(AtomicExprNode):
+ # THIS IS DEPRECATED, USE LetRefNode instead
+ # handle TempHandle
+
+ def analyse_types(self, env):
+ assert self.type == self.handle.type
+ return self
+
+ def analyse_target_types(self, env):
+ assert self.type == self.handle.type
+ return self
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def calculate_result_code(self):
+ result = self.handle.temp
+ if result is None: result = "<error>" # might be called and overwritten
+ return result
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ if self.type.is_pyobject:
+ rhs.make_owned_reference(code)
+ # TODO: analyse control flow to see if this is necessary
+ code.put_xdecref(self.result(), self.ctype())
+ code.putln('%s = %s;' % (
+ self.result(),
+ rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()),
+ ))
+ rhs.generate_post_assignment_code(code)
+ rhs.free_temps(code)
+
+
+class TempsBlockNode(Node):
+ # THIS IS DEPRECATED, USE LetNode instead
+
+ """
+ Creates a block which allocates temporary variables.
+ This is used by transforms to output constructs that need
+ to make use of a temporary variable. Simply pass the types
+ of the needed temporaries to the constructor.
+
+ The variables can be referred to using a TempRefNode
+ (which can be constructed by calling get_ref_node).
+ """
+
+ # temps [TempHandle]
+ # body StatNode
+
+ child_attrs = ["body"]
+
+ def generate_execution_code(self, code):
+ for handle in self.temps:
+ handle.temp = code.funcstate.allocate_temp(
+ handle.type, manage_ref=handle.needs_cleanup)
+ self.body.generate_execution_code(code)
+ for handle in self.temps:
+ if handle.needs_cleanup:
+ if handle.needs_xdecref:
+ code.put_xdecref_clear(handle.temp, handle.type)
+ else:
+ code.put_decref_clear(handle.temp, handle.type)
+ code.funcstate.release_temp(handle.temp)
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.body.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.body.annotate(code)
+
+
+class ResultRefNode(AtomicExprNode):
+ # A reference to the result of an expression. The result_code
+ # must be set externally (usually a temp name).
+
+ subexprs = []
+ lhs_of_first_assignment = False
+
+ def __init__(self, expression=None, pos=None, type=None, may_hold_none=True, is_temp=False):
+ self.expression = expression
+ self.pos = None
+ self.may_hold_none = may_hold_none
+ if expression is not None:
+ self.pos = expression.pos
+ if hasattr(expression, "type"):
+ self.type = expression.type
+ if pos is not None:
+ self.pos = pos
+ if type is not None:
+ self.type = type
+ if is_temp:
+ self.is_temp = True
+ assert self.pos is not None
+
+ def clone_node(self):
+ # nothing to do here
+ return self
+
+ def type_dependencies(self, env):
+ if self.expression:
+ return self.expression.type_dependencies(env)
+ else:
+ return ()
+
+ def update_expression(self, expression):
+ self.expression = expression
+ if hasattr(expression, "type"):
+ self.type = expression.type
+
+ def analyse_types(self, env):
+ if self.expression is not None:
+ if not self.expression.type:
+ self.expression = self.expression.analyse_types(env)
+ self.type = self.expression.type
+ return self
+
+ def infer_type(self, env):
+ if self.type is not None:
+ return self.type
+ if self.expression is not None:
+ if self.expression.type is not None:
+ return self.expression.type
+ return self.expression.infer_type(env)
+ assert False, "cannot infer type of ResultRefNode"
+
+ def may_be_none(self):
+ if not self.type.is_pyobject:
+ return False
+ return self.may_hold_none
+
+ def _DISABLED_may_be_none(self):
+ # not sure if this is safe - the expression may not be the
+ # only value that gets assigned
+ if self.expression is not None:
+ return self.expression.may_be_none()
+ if self.type is not None:
+ return self.type.is_pyobject
+ return True # play safe
+
+ def is_simple(self):
+ return True
+
+ def result(self):
+ try:
+ return self.result_code
+ except AttributeError:
+ if self.expression is not None:
+ self.result_code = self.expression.result()
+ return self.result_code
+
+ def generate_evaluation_code(self, code):
+ pass
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_disposal_code(self, code):
+ pass
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ if self.type.is_pyobject:
+ rhs.make_owned_reference(code)
+ if not self.lhs_of_first_assignment:
+ code.put_decref(self.result(), self.ctype())
+ code.putln('%s = %s;' % (
+ self.result(),
+ rhs.result() if overloaded_assignment else rhs.result_as(self.ctype()),
+ ))
+ rhs.generate_post_assignment_code(code)
+ rhs.free_temps(code)
+
+ def allocate_temps(self, env):
+ pass
+
+ def release_temp(self, env):
+ pass
+
+ def free_temps(self, code):
+ pass
+
+
+class LetNodeMixin:
+ def set_temp_expr(self, lazy_temp):
+ self.lazy_temp = lazy_temp
+ self.temp_expression = lazy_temp.expression
+
+ def setup_temp_expr(self, code):
+ self.temp_expression.generate_evaluation_code(code)
+ self.temp_type = self.temp_expression.type
+ if self.temp_type.is_array:
+ self.temp_type = c_ptr_type(self.temp_type.base_type)
+ self._result_in_temp = self.temp_expression.result_in_temp()
+ if self._result_in_temp:
+ self.temp = self.temp_expression.result()
+ else:
+ self.temp_expression.make_owned_reference(code)
+ self.temp = code.funcstate.allocate_temp(
+ self.temp_type, manage_ref=True)
+ code.putln("%s = %s;" % (self.temp, self.temp_expression.result()))
+ self.temp_expression.generate_disposal_code(code)
+ self.temp_expression.free_temps(code)
+ self.lazy_temp.result_code = self.temp
+
+ def teardown_temp_expr(self, code):
+ if self._result_in_temp:
+ self.temp_expression.generate_disposal_code(code)
+ self.temp_expression.free_temps(code)
+ else:
+ if self.temp_type.is_pyobject:
+ code.put_decref_clear(self.temp, self.temp_type)
+ code.funcstate.release_temp(self.temp)
+
+
+class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
+ # A wrapper around a subexpression that moves an expression into a
+ # temp variable and provides it to the subexpression.
+
+ subexprs = ['temp_expression', 'subexpression']
+
+ def __init__(self, lazy_temp, subexpression):
+ self.set_temp_expr(lazy_temp)
+ self.pos = subexpression.pos
+ self.subexpression = subexpression
+ # if called after type analysis, we already know the type here
+ self.type = self.subexpression.type
+
+ def infer_type(self, env):
+ return self.subexpression.infer_type(env)
+
+ def may_be_none(self):
+ return self.subexpression.may_be_none()
+
+ def result(self):
+ return self.subexpression.result()
+
+ def analyse_types(self, env):
+ self.temp_expression = self.temp_expression.analyse_types(env)
+ self.lazy_temp.update_expression(self.temp_expression) # overwrite in case it changed
+ self.subexpression = self.subexpression.analyse_types(env)
+ self.type = self.subexpression.type
+ return self
+
+ def free_subexpr_temps(self, code):
+ self.subexpression.free_temps(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ self.subexpression.generate_disposal_code(code)
+
+ def generate_evaluation_code(self, code):
+ self.setup_temp_expr(code)
+ self.subexpression.generate_evaluation_code(code)
+ self.teardown_temp_expr(code)
+
+
+LetRefNode = ResultRefNode
+
+
+class LetNode(Nodes.StatNode, LetNodeMixin):
+ # Implements a local temporary variable scope. Imagine this
+ # syntax being present:
+ # let temp = VALUE:
+ # BLOCK (can modify temp)
+ # if temp is an object, decref
+ #
+ # Usually used after analysis phase, but forwards analysis methods
+ # to its children
+
+ child_attrs = ['temp_expression', 'body']
+
+ def __init__(self, lazy_temp, body):
+ self.set_temp_expr(lazy_temp)
+ self.pos = body.pos
+ self.body = body
+
+ def analyse_declarations(self, env):
+ self.temp_expression.analyse_declarations(env)
+ self.body.analyse_declarations(env)
+
+ def analyse_expressions(self, env):
+ self.temp_expression = self.temp_expression.analyse_expressions(env)
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_execution_code(self, code):
+ self.setup_temp_expr(code)
+ self.body.generate_execution_code(code)
+ self.teardown_temp_expr(code)
+
+ def generate_function_definitions(self, env, code):
+ self.temp_expression.generate_function_definitions(env, code)
+ self.body.generate_function_definitions(env, code)
+
+
+class TempResultFromStatNode(ExprNodes.ExprNode):
+ # An ExprNode wrapper around a StatNode that executes the StatNode
+ # body. Requires a ResultRefNode that it sets up to refer to its
+ # own temp result. The StatNode must assign a value to the result
+ # node, which then becomes the result of this node.
+
+ subexprs = []
+ child_attrs = ['body']
+
+ def __init__(self, result_ref, body):
+ self.result_ref = result_ref
+ self.pos = body.pos
+ self.body = body
+ self.type = result_ref.type
+ self.is_temp = 1
+
+ def analyse_declarations(self, env):
+ self.body.analyse_declarations(env)
+
+ def analyse_types(self, env):
+ self.body = self.body.analyse_expressions(env)
+ return self
+
+ def generate_result_code(self, code):
+ self.result_ref.result_code = self.result()
+ self.body.generate_execution_code(code)
diff --git a/contrib/tools/cython/Cython/Compiler/UtilityCode.py b/contrib/tools/cython/Cython/Compiler/UtilityCode.py
new file mode 100644
index 0000000000..98e9ab5bfb
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/UtilityCode.py
@@ -0,0 +1,237 @@
+from __future__ import absolute_import
+
+from .TreeFragment import parse_from_strings, StringParseContext
+from . import Symtab
+from . import Naming
+from . import Code
+
+
+class NonManglingModuleScope(Symtab.ModuleScope):
+
+ def __init__(self, prefix, *args, **kw):
+ self.prefix = prefix
+ self.cython_scope = None
+ self.cpp = kw.pop('cpp', False)
+ Symtab.ModuleScope.__init__(self, *args, **kw)
+
+ def add_imported_entry(self, name, entry, pos):
+ entry.used = True
+ return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos)
+
+ def mangle(self, prefix, name=None):
+ if name:
+ if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix):
+ # Functions, classes etc. gets a manually defined prefix easily
+ # manually callable instead (the one passed to CythonUtilityCode)
+ prefix = self.prefix
+ return "%s%s" % (prefix, name)
+ else:
+ return Symtab.ModuleScope.mangle(self, prefix)
+
+
+class CythonUtilityCodeContext(StringParseContext):
+ scope = None
+
+ def find_module(self, module_name, relative_to=None, pos=None, need_pxd=True, absolute_fallback=True):
+ if relative_to:
+ raise AssertionError("Relative imports not supported in utility code.")
+ if module_name != self.module_name:
+ if module_name not in self.modules:
+ raise AssertionError("Only the cython cimport is supported.")
+ else:
+ return self.modules[module_name]
+
+ if self.scope is None:
+ self.scope = NonManglingModuleScope(
+ self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp)
+
+ return self.scope
+
+
+class CythonUtilityCode(Code.UtilityCodeBase):
+ """
+ Utility code written in the Cython language itself.
+
+ The @cname decorator can set the cname for a function, method of cdef class.
+ Functions decorated with @cname('c_func_name') get the given cname.
+
+ For cdef classes the rules are as follows:
+ obj struct -> <cname>_obj
+ obj type ptr -> <cname>_type
+ methods -> <class_cname>_<method_cname>
+
+ For methods the cname decorator is optional, but without the decorator the
+ methods will not be prototyped. See Cython.Compiler.CythonScope and
+ tests/run/cythonscope.pyx for examples.
+ """
+
+ is_cython_utility = True
+
+ def __init__(self, impl, name="__pyxutil", prefix="", requires=None,
+ file=None, from_scope=None, context=None, compiler_directives=None,
+ outer_module_scope=None):
+ # 1) We need to delay the parsing/processing, so that all modules can be
+ # imported without import loops
+ # 2) The same utility code object can be used for multiple source files;
+ # while the generated node trees can be altered in the compilation of a
+ # single file.
+ # Hence, delay any processing until later.
+ context_types = {}
+ if context is not None:
+ from .PyrexTypes import BaseType
+ for key, value in context.items():
+ if isinstance(value, BaseType):
+ context[key] = key
+ context_types[key] = value
+ impl = Code.sub_tempita(impl, context, file, name)
+ self.impl = impl
+ self.name = name
+ self.file = file
+ self.prefix = prefix
+ self.requires = requires or []
+ self.from_scope = from_scope
+ self.outer_module_scope = outer_module_scope
+ self.compiler_directives = compiler_directives
+ self.context_types = context_types
+
+ def __eq__(self, other):
+ if isinstance(other, CythonUtilityCode):
+ return self._equality_params() == other._equality_params()
+ else:
+ return False
+
+ def _equality_params(self):
+ outer_scope = self.outer_module_scope
+ while isinstance(outer_scope, NonManglingModuleScope):
+ outer_scope = outer_scope.outer_scope
+ return self.impl, outer_scope, self.compiler_directives
+
+ def __hash__(self):
+ return hash(self.impl)
+
+ def get_tree(self, entries_only=False, cython_scope=None):
+ from .AnalysedTreeTransforms import AutoTestDictTransform
+ # The AutoTestDictTransform creates the statement "__test__ = {}",
+ # which when copied into the main ModuleNode overwrites
+ # any __test__ in user code; not desired
+ excludes = [AutoTestDictTransform]
+
+ from . import Pipeline, ParseTreeTransforms
+ context = CythonUtilityCodeContext(
+ self.name, compiler_directives=self.compiler_directives,
+ cpp=cython_scope.is_cpp() if cython_scope else False)
+ context.prefix = self.prefix
+ context.cython_scope = cython_scope
+ #context = StringParseContext(self.name)
+ tree = parse_from_strings(
+ self.name, self.impl, context=context, allow_struct_enum_decorator=True)
+ pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes)
+
+ if entries_only:
+ p = []
+ for t in pipeline:
+ p.append(t)
+ if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform):
+ break
+
+ pipeline = p
+
+ transform = ParseTreeTransforms.CnameDirectivesTransform(context)
+ # InterpretCompilerDirectives already does a cdef declarator check
+ #before = ParseTreeTransforms.DecoratorTransform
+ before = ParseTreeTransforms.InterpretCompilerDirectives
+ pipeline = Pipeline.insert_into_pipeline(pipeline, transform,
+ before=before)
+
+ def merge_scope(scope):
+ def merge_scope_transform(module_node):
+ module_node.scope.merge_in(scope)
+ return module_node
+ return merge_scope_transform
+
+ if self.from_scope:
+ pipeline = Pipeline.insert_into_pipeline(
+ pipeline, merge_scope(self.from_scope),
+ before=ParseTreeTransforms.AnalyseDeclarationsTransform)
+
+ for dep in self.requires:
+ if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope:
+ pipeline = Pipeline.insert_into_pipeline(
+ pipeline, merge_scope(dep.tree.scope),
+ before=ParseTreeTransforms.AnalyseDeclarationsTransform)
+
+ if self.outer_module_scope:
+ # inject outer module between utility code module and builtin module
+ def scope_transform(module_node):
+ module_node.scope.outer_scope = self.outer_module_scope
+ return module_node
+
+ pipeline = Pipeline.insert_into_pipeline(
+ pipeline, scope_transform,
+ before=ParseTreeTransforms.AnalyseDeclarationsTransform)
+
+ if self.context_types:
+ # inject types into module scope
+ def scope_transform(module_node):
+ for name, type in self.context_types.items():
+ entry = module_node.scope.declare_type(name, type, None, visibility='extern')
+ entry.in_cinclude = True
+ return module_node
+
+ pipeline = Pipeline.insert_into_pipeline(
+ pipeline, scope_transform,
+ before=ParseTreeTransforms.AnalyseDeclarationsTransform)
+
+ (err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
+ assert not err, err
+ self.tree = tree
+ return tree
+
+ def put_code(self, output):
+ pass
+
+ @classmethod
+ def load_as_string(cls, util_code_name, from_file=None, **kwargs):
+ """
+ Load a utility code as a string. Returns (proto, implementation)
+ """
+ util = cls.load(util_code_name, from_file, **kwargs)
+ return util.proto, util.impl # keep line numbers => no lstrip()
+
+ def declare_in_scope(self, dest_scope, used=False, cython_scope=None,
+ whitelist=None):
+ """
+ Declare all entries from the utility code in dest_scope. Code will only
+ be included for used entries. If module_name is given, declare the
+ type entries with that name.
+ """
+ tree = self.get_tree(entries_only=True, cython_scope=cython_scope)
+
+ entries = tree.scope.entries
+ entries.pop('__name__')
+ entries.pop('__file__')
+ entries.pop('__builtins__')
+ entries.pop('__doc__')
+
+ for entry in entries.values():
+ entry.utility_code_definition = self
+ entry.used = used
+
+ original_scope = tree.scope
+ dest_scope.merge_in(original_scope, merge_unused=True, whitelist=whitelist)
+ tree.scope = dest_scope
+
+ for dep in self.requires:
+ if dep.is_cython_utility:
+ dep.declare_in_scope(dest_scope, cython_scope=cython_scope)
+
+ return original_scope
+
+
+def declare_declarations_in_scope(declaration_string, env, private_type=True,
+ *args, **kwargs):
+ """
+ Declare some declarations given as Cython code in declaration_string
+ in scope env.
+ """
+ CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
diff --git a/contrib/tools/cython/Cython/Compiler/Version.py b/contrib/tools/cython/Cython/Compiler/Version.py
new file mode 100644
index 0000000000..dcb561f78c
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Version.py
@@ -0,0 +1,9 @@
+# for backwards compatibility
+
+from __future__ import absolute_import
+
+from .. import __version__ as version
+
+# For 'generated by' header line in C files.
+
+watermark = str(version)
diff --git a/contrib/tools/cython/Cython/Compiler/Visitor.pxd b/contrib/tools/cython/Cython/Compiler/Visitor.pxd
new file mode 100644
index 0000000000..d5d5692aa7
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Visitor.pxd
@@ -0,0 +1,55 @@
+from __future__ import absolute_import
+
+cimport cython
+
+cdef class TreeVisitor:
+ cdef public list access_path
+ cdef dict dispatch_table
+
+ cpdef visit(self, obj)
+ cdef _visit(self, obj)
+ cdef find_handler(self, obj)
+ cdef _visitchild(self, child, parent, attrname, idx)
+ cdef dict _visitchildren(self, parent, attrs)
+ cpdef visitchildren(self, parent, attrs=*)
+ cdef _raise_compiler_error(self, child, e)
+
+cdef class VisitorTransform(TreeVisitor):
+ cdef dict _process_children(self, parent, attrs=*)
+ cpdef visitchildren(self, parent, attrs=*, exclude=*)
+ cdef list _flatten_list(self, list orig_list)
+ cdef list _select_attrs(self, attrs, exclude)
+
+cdef class CythonTransform(VisitorTransform):
+ cdef public context
+ cdef public current_directives
+
+cdef class ScopeTrackingTransform(CythonTransform):
+ cdef public scope_type
+ cdef public scope_node
+ cdef visit_scope(self, node, scope_type)
+
+cdef class EnvTransform(CythonTransform):
+ cdef public list env_stack
+
+cdef class MethodDispatcherTransform(EnvTransform):
+ @cython.final
+ cdef _visit_binop_node(self, node)
+ @cython.final
+ cdef _find_handler(self, match_name, bint has_kwargs)
+ @cython.final
+ cdef _delegate_to_assigned_value(self, node, function, arg_list, kwargs)
+ @cython.final
+ cdef _dispatch_to_handler(self, node, function, arg_list, kwargs)
+ @cython.final
+ cdef _dispatch_to_method_handler(self, attr_name, self_arg,
+ is_unbound_method, type_name,
+ node, function, arg_list, kwargs)
+
+cdef class RecursiveNodeReplacer(VisitorTransform):
+ cdef public orig_node
+ cdef public new_node
+
+cdef class NodeFinder(TreeVisitor):
+ cdef node
+ cdef public bint found
diff --git a/contrib/tools/cython/Cython/Compiler/Visitor.py b/contrib/tools/cython/Cython/Compiler/Visitor.py
new file mode 100644
index 0000000000..a35d13e1d0
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/Visitor.py
@@ -0,0 +1,840 @@
+# cython: infer_types=True
+# cython: language_level=3
+# cython: auto_pickle=False
+
+#
+# Tree visitor and transform framework
+#
+
+from __future__ import absolute_import, print_function
+
+import sys
+import inspect
+
+from . import TypeSlots
+from . import Builtin
+from . import Nodes
+from . import ExprNodes
+from . import Errors
+from . import DebugFlags
+from . import Future
+
+import cython
+
+
+cython.declare(_PRINTABLE=tuple)
+
+if sys.version_info[0] >= 3:
+ _PRINTABLE = (bytes, str, int, float)
+else:
+ _PRINTABLE = (str, unicode, long, int, float)
+
+
+class TreeVisitor(object):
+ """
+ Base class for writing visitors for a Cython tree, contains utilities for
+ recursing such trees using visitors. Each node is
+ expected to have a child_attrs iterable containing the names of attributes
+ containing child nodes or lists of child nodes. Lists are not considered
+ part of the tree structure (i.e. contained nodes are considered direct
+ children of the parent node).
+
+ visit_children visits each of the children of a given node (see the visit_children
+ documentation). When recursing the tree using visit_children, an attribute
+ access_path is maintained which gives information about the current location
+ in the tree as a stack of tuples: (parent_node, attrname, index), representing
+ the node, attribute and optional list index that was taken in each step in the path to
+ the current node.
+
+ Example:
+
+ >>> class SampleNode(object):
+ ... child_attrs = ["head", "body"]
+ ... def __init__(self, value, head=None, body=None):
+ ... self.value = value
+ ... self.head = head
+ ... self.body = body
+ ... def __repr__(self): return "SampleNode(%s)" % self.value
+ ...
+ >>> tree = SampleNode(0, SampleNode(1), [SampleNode(2), SampleNode(3)])
+ >>> class MyVisitor(TreeVisitor):
+ ... def visit_SampleNode(self, node):
+ ... print("in %s %s" % (node.value, self.access_path))
+ ... self.visitchildren(node)
+ ... print("out %s" % node.value)
+ ...
+ >>> MyVisitor().visit(tree)
+ in 0 []
+ in 1 [(SampleNode(0), 'head', None)]
+ out 1
+ in 2 [(SampleNode(0), 'body', 0)]
+ out 2
+ in 3 [(SampleNode(0), 'body', 1)]
+ out 3
+ out 0
+ """
+ def __init__(self):
+ super(TreeVisitor, self).__init__()
+ self.dispatch_table = {}
+ self.access_path = []
+
+ def dump_node(self, node):
+ ignored = list(node.child_attrs or []) + [
+ u'child_attrs', u'pos', u'gil_message', u'cpp_message', u'subexprs']
+ values = []
+ pos = getattr(node, 'pos', None)
+ if pos:
+ source = pos[0]
+ if source:
+ import os.path
+ source = os.path.basename(source.get_description())
+ values.append(u'%s:%s:%s' % (source, pos[1], pos[2]))
+ attribute_names = dir(node)
+ for attr in attribute_names:
+ if attr in ignored:
+ continue
+ if attr.startswith('_') or attr.endswith('_'):
+ continue
+ try:
+ value = getattr(node, attr)
+ except AttributeError:
+ continue
+ if value is None or value == 0:
+ continue
+ elif isinstance(value, list):
+ value = u'[...]/%d' % len(value)
+ elif not isinstance(value, _PRINTABLE):
+ continue
+ else:
+ value = repr(value)
+ values.append(u'%s = %s' % (attr, value))
+ return u'%s(%s)' % (node.__class__.__name__, u',\n '.join(values))
+
+ def _find_node_path(self, stacktrace):
+ import os.path
+ last_traceback = stacktrace
+ nodes = []
+ while hasattr(stacktrace, 'tb_frame'):
+ frame = stacktrace.tb_frame
+ node = frame.f_locals.get(u'self')
+ if isinstance(node, Nodes.Node):
+ code = frame.f_code
+ method_name = code.co_name
+ pos = (os.path.basename(code.co_filename),
+ frame.f_lineno)
+ nodes.append((node, method_name, pos))
+ last_traceback = stacktrace
+ stacktrace = stacktrace.tb_next
+ return (last_traceback, nodes)
+
+ def _raise_compiler_error(self, child, e):
+ trace = ['']
+ for parent, attribute, index in self.access_path:
+ node = getattr(parent, attribute)
+ if index is None:
+ index = ''
+ else:
+ node = node[index]
+ index = u'[%d]' % index
+ trace.append(u'%s.%s%s = %s' % (
+ parent.__class__.__name__, attribute, index,
+ self.dump_node(node)))
+ stacktrace, called_nodes = self._find_node_path(sys.exc_info()[2])
+ last_node = child
+ for node, method_name, pos in called_nodes:
+ last_node = node
+ trace.append(u"File '%s', line %d, in %s: %s" % (
+ pos[0], pos[1], method_name, self.dump_node(node)))
+ raise Errors.CompilerCrash(
+ getattr(last_node, 'pos', None), self.__class__.__name__,
+ u'\n'.join(trace), e, stacktrace)
+
+ @cython.final
+ def find_handler(self, obj):
+ # to resolve, try entire hierarchy
+ cls = type(obj)
+ pattern = "visit_%s"
+ mro = inspect.getmro(cls)
+ for mro_cls in mro:
+ handler_method = getattr(self, pattern % mro_cls.__name__, None)
+ if handler_method is not None:
+ return handler_method
+ print(type(self), cls)
+ if self.access_path:
+ print(self.access_path)
+ print(self.access_path[-1][0].pos)
+ print(self.access_path[-1][0].__dict__)
+ raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
+
+ def visit(self, obj):
+ return self._visit(obj)
+
+ @cython.final
+ def _visit(self, obj):
+ try:
+ try:
+ handler_method = self.dispatch_table[type(obj)]
+ except KeyError:
+ handler_method = self.find_handler(obj)
+ self.dispatch_table[type(obj)] = handler_method
+ return handler_method(obj)
+ except Errors.CompileError:
+ raise
+ except Errors.AbortError:
+ raise
+ except Exception as e:
+ if DebugFlags.debug_no_exception_intercept:
+ raise
+ self._raise_compiler_error(obj, e)
+
+ @cython.final
+ def _visitchild(self, child, parent, attrname, idx):
+ self.access_path.append((parent, attrname, idx))
+ result = self._visit(child)
+ self.access_path.pop()
+ return result
+
+ def visitchildren(self, parent, attrs=None):
+ return self._visitchildren(parent, attrs)
+
+ @cython.final
+ @cython.locals(idx=cython.Py_ssize_t)
+ def _visitchildren(self, parent, attrs):
+ """
+ Visits the children of the given parent. If parent is None, returns
+ immediately (returning None).
+
+ The return value is a dictionary giving the results for each
+ child (mapping the attribute name to either the return value
+ or a list of return values (in the case of multiple children
+ in an attribute)).
+ """
+ if parent is None: return None
+ result = {}
+ for attr in parent.child_attrs:
+ if attrs is not None and attr not in attrs: continue
+ child = getattr(parent, attr)
+ if child is not None:
+ if type(child) is list:
+ childretval = [self._visitchild(x, parent, attr, idx) for idx, x in enumerate(child)]
+ else:
+ childretval = self._visitchild(child, parent, attr, None)
+ assert not isinstance(childretval, list), 'Cannot insert list here: %s in %r' % (attr, parent)
+ result[attr] = childretval
+ return result
+
+
+class VisitorTransform(TreeVisitor):
+ """
+ A tree transform is a base class for visitors that wants to do stream
+ processing of the structure (rather than attributes etc.) of a tree.
+
+ It implements __call__ to simply visit the argument node.
+
+ It requires the visitor methods to return the nodes which should take
+ the place of the visited node in the result tree (which can be the same
+ or one or more replacement). Specifically, if the return value from
+ a visitor method is:
+
+ - [] or None; the visited node will be removed (set to None if an attribute and
+ removed if in a list)
+ - A single node; the visited node will be replaced by the returned node.
+ - A list of nodes; the visited nodes will be replaced by all the nodes in the
+ list. This will only work if the node was already a member of a list; if it
+ was not, an exception will be raised. (Typically you want to ensure that you
+ are within a StatListNode or similar before doing this.)
+ """
+ def visitchildren(self, parent, attrs=None, exclude=None):
+ # generic def entry point for calls from Python subclasses
+ if exclude is not None:
+ attrs = self._select_attrs(parent.child_attrs if attrs is None else attrs, exclude)
+ return self._process_children(parent, attrs)
+
+ @cython.final
+ def _select_attrs(self, attrs, exclude):
+ return [name for name in attrs if name not in exclude]
+
+ @cython.final
+ def _process_children(self, parent, attrs=None):
+ # fast cdef entry point for calls from Cython subclasses
+ result = self._visitchildren(parent, attrs)
+ for attr, newnode in result.items():
+ if type(newnode) is list:
+ newnode = self._flatten_list(newnode)
+ setattr(parent, attr, newnode)
+ return result
+
+ @cython.final
+ def _flatten_list(self, orig_list):
+ # Flatten the list one level and remove any None
+ newlist = []
+ for x in orig_list:
+ if x is not None:
+ if type(x) is list:
+ newlist.extend(x)
+ else:
+ newlist.append(x)
+ return newlist
+
+ def recurse_to_children(self, node):
+ self._process_children(node)
+ return node
+
+ def __call__(self, root):
+ return self._visit(root)
+
+
+class CythonTransform(VisitorTransform):
+ """
+ Certain common conventions and utilities for Cython transforms.
+
+ - Sets up the context of the pipeline in self.context
+ - Tracks directives in effect in self.current_directives
+ """
+ def __init__(self, context):
+ super(CythonTransform, self).__init__()
+ self.context = context
+
+ def __call__(self, node):
+ from . import ModuleNode
+ if isinstance(node, ModuleNode.ModuleNode):
+ self.current_directives = node.directives
+ return super(CythonTransform, self).__call__(node)
+
+ def visit_CompilerDirectivesNode(self, node):
+ old = self.current_directives
+ self.current_directives = node.directives
+ self._process_children(node)
+ self.current_directives = old
+ return node
+
+ def visit_Node(self, node):
+ self._process_children(node)
+ return node
+
+
+class ScopeTrackingTransform(CythonTransform):
+ # Keeps track of type of scopes
+ #scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct'
+ #scope_node: the node that owns the current scope
+
+ def visit_ModuleNode(self, node):
+ self.scope_type = 'module'
+ self.scope_node = node
+ self._process_children(node)
+ return node
+
+ def visit_scope(self, node, scope_type):
+ prev = self.scope_type, self.scope_node
+ self.scope_type = scope_type
+ self.scope_node = node
+ self._process_children(node)
+ self.scope_type, self.scope_node = prev
+ return node
+
+ def visit_CClassDefNode(self, node):
+ return self.visit_scope(node, 'cclass')
+
+ def visit_PyClassDefNode(self, node):
+ return self.visit_scope(node, 'pyclass')
+
+ def visit_FuncDefNode(self, node):
+ return self.visit_scope(node, 'function')
+
+ def visit_CStructOrUnionDefNode(self, node):
+ return self.visit_scope(node, 'struct')
+
+
+class EnvTransform(CythonTransform):
+ """
+ This transformation keeps a stack of the environments.
+ """
+ def __call__(self, root):
+ self.env_stack = []
+ self.enter_scope(root, root.scope)
+ return super(EnvTransform, self).__call__(root)
+
+ def current_env(self):
+ return self.env_stack[-1][1]
+
+ def current_scope_node(self):
+ return self.env_stack[-1][0]
+
+ def global_scope(self):
+ return self.current_env().global_scope()
+
+ def enter_scope(self, node, scope):
+ self.env_stack.append((node, scope))
+
+ def exit_scope(self):
+ self.env_stack.pop()
+
+ def visit_FuncDefNode(self, node):
+ self.enter_scope(node, node.local_scope)
+ self._process_children(node)
+ self.exit_scope()
+ return node
+
+ def visit_GeneratorBodyDefNode(self, node):
+ self._process_children(node)
+ return node
+
+ def visit_ClassDefNode(self, node):
+ self.enter_scope(node, node.scope)
+ self._process_children(node)
+ self.exit_scope()
+ return node
+
+ def visit_CStructOrUnionDefNode(self, node):
+ self.enter_scope(node, node.scope)
+ self._process_children(node)
+ self.exit_scope()
+ return node
+
+ def visit_ScopedExprNode(self, node):
+ if node.expr_scope:
+ self.enter_scope(node, node.expr_scope)
+ self._process_children(node)
+ self.exit_scope()
+ else:
+ self._process_children(node)
+ return node
+
+ def visit_CArgDeclNode(self, node):
+ # default arguments are evaluated in the outer scope
+ if node.default:
+ attrs = [attr for attr in node.child_attrs if attr != 'default']
+ self._process_children(node, attrs)
+ self.enter_scope(node, self.current_env().outer_scope)
+ self.visitchildren(node, ('default',))
+ self.exit_scope()
+ else:
+ self._process_children(node)
+ return node
+
+
+class NodeRefCleanupMixin(object):
+ """
+ Clean up references to nodes that were replaced.
+
+ NOTE: this implementation assumes that the replacement is
+ done first, before hitting any further references during
+ normal tree traversal. This needs to be arranged by calling
+ "self.visitchildren()" at a proper place in the transform
+ and by ordering the "child_attrs" of nodes appropriately.
+ """
+ def __init__(self, *args):
+ super(NodeRefCleanupMixin, self).__init__(*args)
+ self._replacements = {}
+
+ def visit_CloneNode(self, node):
+ arg = node.arg
+ if arg not in self._replacements:
+ self.visitchildren(arg)
+ node.arg = self._replacements.get(arg, arg)
+ return node
+
+ def visit_ResultRefNode(self, node):
+ expr = node.expression
+ if expr is None or expr not in self._replacements:
+ self.visitchildren(node)
+ expr = node.expression
+ if expr is not None:
+ node.expression = self._replacements.get(expr, expr)
+ return node
+
+ def replace(self, node, replacement):
+ self._replacements[node] = replacement
+ return replacement
+
+
+find_special_method_for_binary_operator = {
+ '<': '__lt__',
+ '<=': '__le__',
+ '==': '__eq__',
+ '!=': '__ne__',
+ '>=': '__ge__',
+ '>': '__gt__',
+ '+': '__add__',
+ '&': '__and__',
+ '/': '__div__',
+ '//': '__floordiv__',
+ '<<': '__lshift__',
+ '%': '__mod__',
+ '*': '__mul__',
+ '|': '__or__',
+ '**': '__pow__',
+ '>>': '__rshift__',
+ '-': '__sub__',
+ '^': '__xor__',
+ 'in': '__contains__',
+}.get
+
+
+find_special_method_for_unary_operator = {
+ 'not': '__not__',
+ '~': '__inv__',
+ '-': '__neg__',
+ '+': '__pos__',
+}.get
+
+
+class MethodDispatcherTransform(EnvTransform):
+ """
+ Base class for transformations that want to intercept on specific
+ builtin functions or methods of builtin types, including special
+ methods triggered by Python operators. Must run after declaration
+ analysis when entries were assigned.
+
+ Naming pattern for handler methods is as follows:
+
+ * builtin functions: _handle_(general|simple|any)_function_NAME
+
+ * builtin methods: _handle_(general|simple|any)_method_TYPENAME_METHODNAME
+ """
+ # only visit call nodes and Python operations
+ def visit_GeneralCallNode(self, node):
+ self._process_children(node)
+ function = node.function
+ if not function.type.is_pyobject:
+ return node
+ arg_tuple = node.positional_args
+ if not isinstance(arg_tuple, ExprNodes.TupleNode):
+ return node
+ keyword_args = node.keyword_args
+ if keyword_args and not isinstance(keyword_args, ExprNodes.DictNode):
+ # can't handle **kwargs
+ return node
+ args = arg_tuple.args
+ return self._dispatch_to_handler(node, function, args, keyword_args)
+
+ def visit_SimpleCallNode(self, node):
+ self._process_children(node)
+ function = node.function
+ if function.type.is_pyobject:
+ arg_tuple = node.arg_tuple
+ if not isinstance(arg_tuple, ExprNodes.TupleNode):
+ return node
+ args = arg_tuple.args
+ else:
+ args = node.args
+ return self._dispatch_to_handler(node, function, args, None)
+
+ def visit_PrimaryCmpNode(self, node):
+ if node.cascade:
+ # not currently handled below
+ self._process_children(node)
+ return node
+ return self._visit_binop_node(node)
+
+ def visit_BinopNode(self, node):
+ return self._visit_binop_node(node)
+
+ def _visit_binop_node(self, node):
+ self._process_children(node)
+ # FIXME: could special case 'not_in'
+ special_method_name = find_special_method_for_binary_operator(node.operator)
+ if special_method_name:
+ operand1, operand2 = node.operand1, node.operand2
+ if special_method_name == '__contains__':
+ operand1, operand2 = operand2, operand1
+ elif special_method_name == '__div__':
+ if Future.division in self.current_env().global_scope().context.future_directives:
+ special_method_name = '__truediv__'
+ obj_type = operand1.type
+ if obj_type.is_builtin_type:
+ type_name = obj_type.name
+ else:
+ type_name = "object" # safety measure
+ node = self._dispatch_to_method_handler(
+ special_method_name, None, False, type_name,
+ node, None, [operand1, operand2], None)
+ return node
+
+ def visit_UnopNode(self, node):
+ self._process_children(node)
+ special_method_name = find_special_method_for_unary_operator(node.operator)
+ if special_method_name:
+ operand = node.operand
+ obj_type = operand.type
+ if obj_type.is_builtin_type:
+ type_name = obj_type.name
+ else:
+ type_name = "object" # safety measure
+ node = self._dispatch_to_method_handler(
+ special_method_name, None, False, type_name,
+ node, None, [operand], None)
+ return node
+
+ ### dispatch to specific handlers
+
+ def _find_handler(self, match_name, has_kwargs):
+ call_type = has_kwargs and 'general' or 'simple'
+ handler = getattr(self, '_handle_%s_%s' % (call_type, match_name), None)
+ if handler is None:
+ handler = getattr(self, '_handle_any_%s' % match_name, None)
+ return handler
+
+ def _delegate_to_assigned_value(self, node, function, arg_list, kwargs):
+ assignment = function.cf_state[0]
+ value = assignment.rhs
+ if value.is_name:
+ if not value.entry or len(value.entry.cf_assignments) > 1:
+ # the variable might have been reassigned => play safe
+ return node
+ elif value.is_attribute and value.obj.is_name:
+ if not value.obj.entry or len(value.obj.entry.cf_assignments) > 1:
+ # the underlying variable might have been reassigned => play safe
+ return node
+ else:
+ return node
+ return self._dispatch_to_handler(
+ node, value, arg_list, kwargs)
+
+ def _dispatch_to_handler(self, node, function, arg_list, kwargs):
+ if function.is_name:
+ # we only consider functions that are either builtin
+ # Python functions or builtins that were already replaced
+ # into a C function call (defined in the builtin scope)
+ if not function.entry:
+ return node
+ entry = function.entry
+ is_builtin = (
+ entry.is_builtin or
+ entry is self.current_env().builtin_scope().lookup_here(function.name))
+ if not is_builtin:
+ if function.cf_state and function.cf_state.is_single:
+ # we know the value of the variable
+ # => see if it's usable instead
+ return self._delegate_to_assigned_value(
+ node, function, arg_list, kwargs)
+ if arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type:
+ if entry.scope.parent_type is arg_list[0].type:
+ # Optimised (unbound) method of a builtin type => try to "de-optimise".
+ return self._dispatch_to_method_handler(
+ entry.name, self_arg=None, is_unbound_method=True,
+ type_name=entry.scope.parent_type.name,
+ node=node, function=function, arg_list=arg_list, kwargs=kwargs)
+ return node
+ function_handler = self._find_handler(
+ "function_%s" % function.name, kwargs)
+ if function_handler is None:
+ return self._handle_function(node, function.name, function, arg_list, kwargs)
+ if kwargs:
+ return function_handler(node, function, arg_list, kwargs)
+ else:
+ return function_handler(node, function, arg_list)
+ elif function.is_attribute:
+ attr_name = function.attribute
+ if function.type.is_pyobject:
+ self_arg = function.obj
+ elif node.self and function.entry:
+ entry = function.entry.as_variable
+ if not entry or not entry.is_builtin:
+ return node
+ # C implementation of a Python builtin method - see if we find further matches
+ self_arg = node.self
+ arg_list = arg_list[1:] # drop CloneNode of self argument
+ else:
+ return node
+ obj_type = self_arg.type
+ is_unbound_method = False
+ if obj_type.is_builtin_type:
+ if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject:
+ # calling an unbound method like 'list.append(L,x)'
+ # (ignoring 'type.mro()' here ...)
+ type_name = self_arg.name
+ self_arg = None
+ is_unbound_method = True
+ else:
+ type_name = obj_type.name
+ else:
+ type_name = "object" # safety measure
+ return self._dispatch_to_method_handler(
+ attr_name, self_arg, is_unbound_method, type_name,
+ node, function, arg_list, kwargs)
+ else:
+ return node
+
+ def _dispatch_to_method_handler(self, attr_name, self_arg,
+ is_unbound_method, type_name,
+ node, function, arg_list, kwargs):
+ method_handler = self._find_handler(
+ "method_%s_%s" % (type_name, attr_name), kwargs)
+ if method_handler is None:
+ if (attr_name in TypeSlots.method_name_to_slot
+ or attr_name == '__new__'):
+ method_handler = self._find_handler(
+ "slot%s" % attr_name, kwargs)
+ if method_handler is None:
+ return self._handle_method(
+ node, type_name, attr_name, function,
+ arg_list, is_unbound_method, kwargs)
+ if self_arg is not None:
+ arg_list = [self_arg] + list(arg_list)
+ if kwargs:
+ result = method_handler(
+ node, function, arg_list, is_unbound_method, kwargs)
+ else:
+ result = method_handler(
+ node, function, arg_list, is_unbound_method)
+ return result
+
+ def _handle_function(self, node, function_name, function, arg_list, kwargs):
+ """Fallback handler"""
+ return node
+
+ def _handle_method(self, node, type_name, attr_name, function,
+ arg_list, is_unbound_method, kwargs):
+ """Fallback handler"""
+ return node
+
+
+class RecursiveNodeReplacer(VisitorTransform):
+ """
+ Recursively replace all occurrences of a node in a subtree by
+ another node.
+ """
+ def __init__(self, orig_node, new_node):
+ super(RecursiveNodeReplacer, self).__init__()
+ self.orig_node, self.new_node = orig_node, new_node
+
+ def visit_CloneNode(self, node):
+ if node is self.orig_node:
+ return self.new_node
+ if node.arg is self.orig_node:
+ node.arg = self.new_node
+ return node
+
+ def visit_Node(self, node):
+ self._process_children(node)
+ if node is self.orig_node:
+ return self.new_node
+ else:
+ return node
+
+def recursively_replace_node(tree, old_node, new_node):
+ replace_in = RecursiveNodeReplacer(old_node, new_node)
+ replace_in(tree)
+
+
+class NodeFinder(TreeVisitor):
+ """
+ Find out if a node appears in a subtree.
+ """
+ def __init__(self, node):
+ super(NodeFinder, self).__init__()
+ self.node = node
+ self.found = False
+
+ def visit_Node(self, node):
+ if self.found:
+ pass # short-circuit
+ elif node is self.node:
+ self.found = True
+ else:
+ self._visitchildren(node, None)
+
+def tree_contains(tree, node):
+ finder = NodeFinder(node)
+ finder.visit(tree)
+ return finder.found
+
+
+# Utils
+def replace_node(ptr, value):
+ """Replaces a node. ptr is of the form used on the access path stack
+ (parent, attrname, listidx|None)
+ """
+ parent, attrname, listidx = ptr
+ if listidx is None:
+ setattr(parent, attrname, value)
+ else:
+ getattr(parent, attrname)[listidx] = value
+
+
+class PrintTree(TreeVisitor):
+ """Prints a representation of the tree to standard output.
+ Subclass and override repr_of to provide more information
+ about nodes. """
+ def __init__(self, start=None, end=None):
+ TreeVisitor.__init__(self)
+ self._indent = ""
+ if start is not None or end is not None:
+ self._line_range = (start or 0, end or 2**30)
+ else:
+ self._line_range = None
+
+ def indent(self):
+ self._indent += " "
+
+ def unindent(self):
+ self._indent = self._indent[:-2]
+
+ def __call__(self, tree, phase=None):
+ print("Parse tree dump at phase '%s'" % phase)
+ self.visit(tree)
+ return tree
+
+ # Don't do anything about process_list, the defaults gives
+ # nice-looking name[idx] nodes which will visually appear
+ # under the parent-node, not displaying the list itself in
+ # the hierarchy.
+ def visit_Node(self, node):
+ self._print_node(node)
+ self.indent()
+ self.visitchildren(node)
+ self.unindent()
+ return node
+
+ def visit_CloneNode(self, node):
+ self._print_node(node)
+ self.indent()
+ line = node.pos[1]
+ if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]:
+ print("%s- %s: %s" % (self._indent, 'arg', self.repr_of(node.arg)))
+ self.indent()
+ self.visitchildren(node.arg)
+ self.unindent()
+ self.unindent()
+ return node
+
+ def _print_node(self, node):
+ line = node.pos[1]
+ if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]:
+ if len(self.access_path) == 0:
+ name = "(root)"
+ else:
+ parent, attr, idx = self.access_path[-1]
+ if idx is not None:
+ name = "%s[%d]" % (attr, idx)
+ else:
+ name = attr
+ print("%s- %s: %s" % (self._indent, name, self.repr_of(node)))
+
+ def repr_of(self, node):
+ if node is None:
+ return "(none)"
+ else:
+ result = node.__class__.__name__
+ if isinstance(node, ExprNodes.NameNode):
+ result += "(type=%s, name=\"%s\")" % (repr(node.type), node.name)
+ elif isinstance(node, Nodes.DefNode):
+ result += "(name=\"%s\")" % node.name
+ elif isinstance(node, ExprNodes.ExprNode):
+ t = node.type
+ result += "(type=%s)" % repr(t)
+ elif node.pos:
+ pos = node.pos
+ path = pos[0].get_description()
+ if '/' in path:
+ path = path.split('/')[-1]
+ if '\\' in path:
+ path = path.split('\\')[-1]
+ result += "(pos=(%s:%s:%s))" % (path, pos[1], pos[2])
+
+ return result
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/contrib/tools/cython/Cython/Compiler/__init__.py b/contrib/tools/cython/Cython/Compiler/__init__.py
new file mode 100644
index 0000000000..fa81adaff6
--- /dev/null
+++ b/contrib/tools/cython/Cython/Compiler/__init__.py
@@ -0,0 +1 @@
+# empty file