aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/cython/Cython/Compiler
diff options
context:
space:
mode:
authorAleksandr <ivansduck@gmail.com>2022-02-10 16:47:52 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:52 +0300
commitea6c5b7f172becca389cacaff7d5f45f6adccbe6 (patch)
treed16cef493ac1e092b4a03ab9437ec06ffe3d188f /contrib/tools/cython/Cython/Compiler
parent37de222addabbef336dcaaea5f7c7645a629fc6d (diff)
downloadydb-ea6c5b7f172becca389cacaff7d5f45f6adccbe6.tar.gz
Restoring authorship annotation for Aleksandr <ivansduck@gmail.com>. Commit 1 of 2.
Diffstat (limited to 'contrib/tools/cython/Cython/Compiler')
-rw-r--r--contrib/tools/cython/Cython/Compiler/Annotate.py22
-rw-r--r--contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py94
-rw-r--r--contrib/tools/cython/Cython/Compiler/Buffer.py56
-rw-r--r--contrib/tools/cython/Cython/Compiler/Builtin.py82
-rw-r--r--contrib/tools/cython/Cython/Compiler/CmdLine.py8
-rw-r--r--contrib/tools/cython/Cython/Compiler/Code.pxd40
-rw-r--r--contrib/tools/cython/Cython/Compiler/Code.py408
-rw-r--r--contrib/tools/cython/Cython/Compiler/CodeGeneration.py2
-rw-r--r--contrib/tools/cython/Cython/Compiler/CythonScope.py8
-rw-r--r--contrib/tools/cython/Cython/Compiler/Errors.py36
-rw-r--r--contrib/tools/cython/Cython/Compiler/ExprNodes.py1764
-rw-r--r--contrib/tools/cython/Cython/Compiler/FusedNode.py218
-rw-r--r--contrib/tools/cython/Cython/Compiler/Main.py82
-rw-r--r--contrib/tools/cython/Cython/Compiler/MemoryView.py30
-rw-r--r--contrib/tools/cython/Cython/Compiler/ModuleNode.py860
-rw-r--r--contrib/tools/cython/Cython/Compiler/Naming.py10
-rw-r--r--contrib/tools/cython/Cython/Compiler/Nodes.py1554
-rw-r--r--contrib/tools/cython/Cython/Compiler/Optimize.py952
-rw-r--r--contrib/tools/cython/Cython/Compiler/Options.py38
-rw-r--r--contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd24
-rw-r--r--contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py666
-rw-r--r--contrib/tools/cython/Cython/Compiler/Parsing.pxd6
-rw-r--r--contrib/tools/cython/Cython/Compiler/Parsing.py438
-rw-r--r--contrib/tools/cython/Cython/Compiler/Pipeline.py40
-rw-r--r--contrib/tools/cython/Cython/Compiler/PyrexTypes.py896
-rw-r--r--contrib/tools/cython/Cython/Compiler/Pythran.py328
-rw-r--r--contrib/tools/cython/Cython/Compiler/Scanning.py40
-rw-r--r--contrib/tools/cython/Cython/Compiler/StringEncoding.py18
-rw-r--r--contrib/tools/cython/Cython/Compiler/Symtab.py518
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py2
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py2
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py72
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py38
-rw-r--r--contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py20
-rw-r--r--contrib/tools/cython/Cython/Compiler/TreeFragment.py8
-rw-r--r--contrib/tools/cython/Cython/Compiler/TreePath.py4
-rw-r--r--contrib/tools/cython/Cython/Compiler/TypeInference.py66
-rw-r--r--contrib/tools/cython/Cython/Compiler/TypeSlots.py82
-rw-r--r--contrib/tools/cython/Cython/Compiler/UtilNodes.py8
-rw-r--r--contrib/tools/cython/Cython/Compiler/UtilityCode.py48
-rw-r--r--contrib/tools/cython/Cython/Compiler/Visitor.py22
41 files changed, 4805 insertions, 4805 deletions
diff --git a/contrib/tools/cython/Cython/Compiler/Annotate.py b/contrib/tools/cython/Cython/Compiler/Annotate.py
index 2ea38c00c7..51617cee03 100644
--- a/contrib/tools/cython/Cython/Compiler/Annotate.py
+++ b/contrib/tools/cython/Cython/Compiler/Annotate.py
@@ -109,14 +109,14 @@ class AnnotationCCodeWriter(CCodeWriter):
.cython.code .c_call { color: #0000FF; }
""")
- # on-click toggle function to show/hide C source code
- _onclick_attr = ' onclick="{0}"'.format((
- "(function(s){"
- " s.display = s.display === 'block' ? 'none' : 'block'"
- "})(this.nextElementSibling.style)"
- ).replace(' ', '') # poor dev's JS minification
- )
-
+ # on-click toggle function to show/hide C source code
+ _onclick_attr = ' onclick="{0}"'.format((
+ "(function(s){"
+ " s.display = s.display === 'block' ? 'none' : 'block'"
+ "})(this.nextElementSibling.style)"
+ ).replace(' ', '') # poor dev's JS minification
+ )
+
def save_annotation(self, source_filename, target_filename, coverage_xml=None):
with Utils.open_source_file(source_filename) as f:
code = f.read()
@@ -151,7 +151,7 @@ class AnnotationCCodeWriter(CCodeWriter):
<span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br />
Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it.
</p>
- ''').format(css=self._css(), watermark=Version.watermark,
+ ''').format(css=self._css(), watermark=Version.watermark,
filename=os.path.basename(source_filename) if source_filename else '',
more_info=coverage_info)
]
@@ -253,7 +253,7 @@ class AnnotationCCodeWriter(CCodeWriter):
calls['py_macro_api'] + calls['pyx_macro_api'])
if c_code:
- onclick = self._onclick_attr
+ onclick = self._onclick_attr
expandsymbol = '+'
else:
onclick = ''
@@ -294,7 +294,7 @@ _parse_code = re.compile((
br'(?P<py_macro_api>Py[A-Z][a-z]+_[A-Z][A-Z_]+)|'
br'(?P<py_c_api>Py[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)'
br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement
- br'(?P<error_goto>(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
+ br'(?P<error_goto>(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
).decode('ascii')).sub
diff --git a/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py
index d3c0a1d0da..b18d42030a 100644
--- a/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py
+++ b/contrib/tools/cython/Cython/Compiler/AutoDocTransforms.py
@@ -1,22 +1,22 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function
from .Visitor import CythonTransform
from .StringEncoding import EncodedString
from . import Options
from . import PyrexTypes, ExprNodes
-from ..CodeWriter import ExpressionWriter
-
-
-class AnnotationWriter(ExpressionWriter):
-
- def visit_Node(self, node):
- self.put(u"<???>")
-
- def visit_LambdaNode(self, node):
- # XXX Should we do better?
- self.put("<lambda>")
-
-
+from ..CodeWriter import ExpressionWriter
+
+
+class AnnotationWriter(ExpressionWriter):
+
+ def visit_Node(self, node):
+ self.put(u"<???>")
+
+ def visit_LambdaNode(self, node):
+ # XXX Should we do better?
+ self.put("<lambda>")
+
+
class EmbedSignature(CythonTransform):
def __init__(self, context):
@@ -24,10 +24,10 @@ class EmbedSignature(CythonTransform):
self.class_name = None
self.class_node = None
- def _fmt_expr(self, node):
- writer = AnnotationWriter()
- result = writer.write(node)
- # print(type(node).__name__, '-->', result)
+ def _fmt_expr(self, node):
+ writer = AnnotationWriter()
+ result = writer.write(node)
+ # print(type(node).__name__, '-->', result)
return result
def _fmt_arg(self, arg):
@@ -35,25 +35,25 @@ class EmbedSignature(CythonTransform):
doc = arg.name
else:
doc = arg.type.declaration_code(arg.name, for_display=1)
-
- if arg.annotation:
- annotation = self._fmt_expr(arg.annotation)
- doc = doc + (': %s' % annotation)
- if arg.default:
- default = self._fmt_expr(arg.default)
- doc = doc + (' = %s' % default)
- elif arg.default:
- default = self._fmt_expr(arg.default)
- doc = doc + ('=%s' % default)
+
+ if arg.annotation:
+ annotation = self._fmt_expr(arg.annotation)
+ doc = doc + (': %s' % annotation)
+ if arg.default:
+ default = self._fmt_expr(arg.default)
+ doc = doc + (' = %s' % default)
+ elif arg.default:
+ default = self._fmt_expr(arg.default)
+ doc = doc + ('=%s' % default)
return doc
- def _fmt_star_arg(self, arg):
- arg_doc = arg.name
- if arg.annotation:
- annotation = self._fmt_expr(arg.annotation)
- arg_doc = arg_doc + (': %s' % annotation)
- return arg_doc
-
+ def _fmt_star_arg(self, arg):
+ arg_doc = arg.name
+ if arg.annotation:
+ annotation = self._fmt_expr(arg.annotation)
+ arg_doc = arg_doc + (': %s' % annotation)
+ return arg_doc
+
def _fmt_arglist(self, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
@@ -64,13 +64,13 @@ class EmbedSignature(CythonTransform):
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
- arg_doc = self._fmt_star_arg(pargs)
- arglist.insert(npargs, '*%s' % arg_doc)
+ arg_doc = self._fmt_star_arg(pargs)
+ arglist.insert(npargs, '*%s' % arg_doc)
elif nkargs:
arglist.insert(npargs, '*')
if kargs:
- arg_doc = self._fmt_star_arg(kargs)
- arglist.append('**%s' % arg_doc)
+ arg_doc = self._fmt_star_arg(kargs)
+ arglist.append('**%s' % arg_doc)
return arglist
def _fmt_ret_type(self, ret):
@@ -82,7 +82,7 @@ class EmbedSignature(CythonTransform):
def _fmt_signature(self, cls_name, func_name, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
- return_expr=None,
+ return_expr=None,
return_type=None, hide_self=False):
arglist = self._fmt_arglist(args,
npargs, pargs,
@@ -92,13 +92,13 @@ class EmbedSignature(CythonTransform):
func_doc = '%s(%s)' % (func_name, arglist_doc)
if cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
- ret_doc = None
- if return_expr:
- ret_doc = self._fmt_expr(return_expr)
- elif return_type:
+ ret_doc = None
+ if return_expr:
+ ret_doc = self._fmt_expr(return_expr)
+ elif return_type:
ret_doc = self._fmt_ret_type(return_type)
- if ret_doc:
- func_doc = '%s -> %s' % (func_doc, ret_doc)
+ if ret_doc:
+ func_doc = '%s -> %s' % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
@@ -153,7 +153,7 @@ class EmbedSignature(CythonTransform):
class_name, func_name, node.args,
npargs, node.star_arg,
nkargs, node.starstar_arg,
- return_expr=node.return_type_annotation,
+ return_expr=node.return_type_annotation,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor:
diff --git a/contrib/tools/cython/Cython/Compiler/Buffer.py b/contrib/tools/cython/Cython/Compiler/Buffer.py
index c62a24f568..f8c70b156b 100644
--- a/contrib/tools/cython/Cython/Compiler/Buffer.py
+++ b/contrib/tools/cython/Cython/Compiler/Buffer.py
@@ -316,7 +316,7 @@ def put_init_vars(entry, code):
code.putln("%s.data = NULL;" % pybuffernd_struct)
code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
-
+
def put_acquire_arg_buffer(entry, code, pos):
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
@@ -326,16 +326,16 @@ def put_acquire_arg_buffer(entry, code, pos):
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
- # An exception raised in arg parsing cannot be caught, so no
+ # An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
-
+
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
-
+
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
@@ -344,12 +344,12 @@ def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
- code.globalstate.use_utility_code(acquire_utility_code)
+ code.globalstate.use_utility_code(acquire_utility_code)
return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
"%(cast)d, __pyx_stack)" % locals())
-
+
def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
is_initialized, pos, code):
"""
@@ -370,7 +370,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
- code.putln("{") # Set up necessary stack for getbuffer
+ code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
@@ -386,18 +386,18 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
# before raising the exception. A failure of reacquisition
# will cause the reacquisition exception to be reported, one
# can consider working around this later.
- exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
- for _ in range(3))
- code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
+ exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
+ for _ in range(3))
+ code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
- code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
+ code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
code.globalstate.use_utility_code(raise_buffer_fallback_code)
code.putln('__Pyx_RaiseBufferFallbackError();')
code.putln('} else {')
- code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
- code.putln('}')
- code.putln('%s = %s = %s = 0;' % exc_temps)
- for t in exc_temps:
+ code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
+ code.putln('}')
+ code.putln('%s = %s = %s = 0;' % exc_temps)
+ for t in exc_temps:
code.funcstate.release_temp(t)
code.putln('}')
# Unpack indices
@@ -512,7 +512,7 @@ def buf_lookup_full_code(proto, defin, name, nd):
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
-
+
def buf_lookup_strided_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
@@ -523,7 +523,7 @@ def buf_lookup_strided_code(proto, defin, name, nd):
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
-
+
def buf_lookup_c_code(proto, defin, name, nd):
"""
Similar to strided lookup, but can assume that the last dimension
@@ -537,7 +537,7 @@ def buf_lookup_c_code(proto, defin, name, nd):
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
-
+
def buf_lookup_fortran_code(proto, defin, name, nd):
"""
Like C lookup, but the first index is optimized instead.
@@ -553,7 +553,7 @@ def buf_lookup_fortran_code(proto, defin, name, nd):
def use_py2_buffer_functions(env):
env.use_utility_code(GetAndReleaseBufferUtilityCode())
-
+
class GetAndReleaseBufferUtilityCode(object):
# Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
# For >= 2.6 we do double mode -- use the new buffer interface on objects
@@ -617,7 +617,7 @@ class GetAndReleaseBufferUtilityCode(object):
def mangle_dtype_name(dtype):
- # Use prefixes to separate user defined types from builtins
+ # Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
@@ -636,7 +636,7 @@ def get_type_information_cname(code, dtype, maxdepth=None):
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
- One can separate between complex numbers declared as struct or with native
+ One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
@@ -723,9 +723,9 @@ def load_buffer_utility(util_code_name, context=None, **kwargs):
else:
return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
-context = dict(max_dims=Options.buffer_max_dims)
-buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
-buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
+context = dict(max_dims=Options.buffer_max_dims)
+buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
+buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
# Utility function to set the right exception
# The caller should immediately goto_error
@@ -733,8 +733,8 @@ raise_indexerror_code = load_buffer_utility("BufferIndexError")
raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
-acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
-buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
-
+acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
+buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
+
# See utility code BufferFormatFromTypeInfo
-_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
+_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
diff --git a/contrib/tools/cython/Cython/Compiler/Builtin.py b/contrib/tools/cython/Cython/Compiler/Builtin.py
index 5fa717507d..a337246dda 100644
--- a/contrib/tools/cython/Cython/Compiler/Builtin.py
+++ b/contrib/tools/cython/Cython/Compiler/Builtin.py
@@ -21,7 +21,7 @@ pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
builtin_utility_code = {
- 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
+ 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
}
@@ -95,35 +95,35 @@ builtin_function_table = [
is_strict_signature = True),
BuiltinFunction('abs', "f", "f", "fabsf",
is_strict_signature = True),
- BuiltinFunction('abs', "i", "i", "abs",
- is_strict_signature = True),
- BuiltinFunction('abs', "l", "l", "labs",
- is_strict_signature = True),
- BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
- utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
- func_type = PyrexTypes.CFuncType(
- PyrexTypes.c_longlong_type, [
- PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
- ],
- is_strict_signature = True, nogil=True)),
- ] + list(
- BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
+ BuiltinFunction('abs', "i", "i", "abs",
+ is_strict_signature = True),
+ BuiltinFunction('abs', "l", "l", "labs",
+ is_strict_signature = True),
+ BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
+ utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_longlong_type, [
+ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
+ ],
+ is_strict_signature = True, nogil=True)),
+ ] + list(
+ BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
func_type = PyrexTypes.CFuncType(
- t,
- [PyrexTypes.CFuncTypeArg("arg", t, None)],
- is_strict_signature = True, nogil=True))
- for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
- ) + list(
- BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
+ t,
+ [PyrexTypes.CFuncTypeArg("arg", t, None)],
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
+ ) + list(
+ BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
func_type = PyrexTypes.CFuncType(
- t.real_type, [
- PyrexTypes.CFuncTypeArg("arg", t, None)
+ t.real_type, [
+ PyrexTypes.CFuncTypeArg("arg", t, None)
],
- is_strict_signature = True, nogil=True))
- for t in (PyrexTypes.c_float_complex_type,
- PyrexTypes.c_double_complex_type,
- PyrexTypes.c_longdouble_complex_type)
- ) + [
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_float_complex_type,
+ PyrexTypes.c_double_complex_type,
+ PyrexTypes.c_longdouble_complex_type)
+ ) + [
BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
utility_code=UtilityCode.load("py_abs", "Builtins.c")),
#('all', "", "", ""),
@@ -153,8 +153,8 @@ builtin_function_table = [
utility_code=getattr3_utility_code),
BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
utility_code=getattr_utility_code),
- BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
- utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
+ BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
+ utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
#('hex', "", "", ""),
#('id', "", "", ""),
@@ -329,18 +329,18 @@ builtin_types_table = [
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
- BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
- utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
- BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
- utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
- # update is actually variadic (see Github issue #1645)
-# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
-# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
+ BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
+ utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
+ BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
+ utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
+ # update is actually variadic (see Github issue #1645)
+# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
+# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
BuiltinMethod("add", "TO", "r", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
("frozenset", "PyFrozenSet_Type", []),
("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
- ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
+ ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
]
@@ -392,14 +392,14 @@ def init_builtin_types():
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
- elif name == 'bytearray':
- objstruct_cname = 'PyByteArrayObject'
+ elif name == 'bytearray':
+ objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
objstruct_cname = "PyBaseExceptionObject"
- elif name == 'StopAsyncIteration':
- objstruct_cname = "PyBaseExceptionObject"
+ elif name == 'StopAsyncIteration':
+ objstruct_cname = "PyBaseExceptionObject"
else:
objstruct_cname = 'Py%sObject' % name.capitalize()
the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
diff --git a/contrib/tools/cython/Cython/Compiler/CmdLine.py b/contrib/tools/cython/Cython/Compiler/CmdLine.py
index a20ab38dc2..9c4da0c92d 100644
--- a/contrib/tools/cython/Cython/Compiler/CmdLine.py
+++ b/contrib/tools/cython/Cython/Compiler/CmdLine.py
@@ -153,8 +153,8 @@ def parse_command_line(args):
options.module_name = pop_arg()
elif option == '--init-suffix':
options.init_suffix = pop_arg()
- elif option == '--source-root':
- Options.source_root = pop_arg()
+ elif option == '--source-root':
+ Options.source_root = pop_arg()
elif option == '-2':
options.language_level = 2
elif option == '-3':
@@ -165,8 +165,8 @@ def parse_command_line(args):
options.capi_reexport_cincludes = True
elif option == "--fast-fail":
Options.fast_fail = True
- elif option == "--cimport-from-pyx":
- Options.cimport_from_pyx = True
+ elif option == "--cimport-from-pyx":
+ Options.cimport_from_pyx = True
elif option in ('-Werror', '--warning-errors'):
Options.warning_errors = True
elif option in ('-Wextra', '--warning-extra'):
diff --git a/contrib/tools/cython/Cython/Compiler/Code.pxd b/contrib/tools/cython/Cython/Compiler/Code.pxd
index acad0c1cf4..4fb342dba9 100644
--- a/contrib/tools/cython/Cython/Compiler/Code.pxd
+++ b/contrib/tools/cython/Cython/Compiler/Code.pxd
@@ -5,25 +5,25 @@ cimport cython
from ..StringIOTree cimport StringIOTree
-cdef class UtilityCodeBase(object):
- cpdef format_code(self, code_string, replace_empty_lines=*)
-
-
-cdef class UtilityCode(UtilityCodeBase):
- cdef public object name
- cdef public object proto
- cdef public object impl
- cdef public object init
- cdef public object cleanup
- cdef public object proto_block
- cdef public object requires
- cdef public dict _cache
- cdef public list specialize_list
- cdef public object file
-
- cpdef none_or_sub(self, s, context)
-
-
+cdef class UtilityCodeBase(object):
+ cpdef format_code(self, code_string, replace_empty_lines=*)
+
+
+cdef class UtilityCode(UtilityCodeBase):
+ cdef public object name
+ cdef public object proto
+ cdef public object impl
+ cdef public object init
+ cdef public object cleanup
+ cdef public object proto_block
+ cdef public object requires
+ cdef public dict _cache
+ cdef public list specialize_list
+ cdef public object file
+
+ cpdef none_or_sub(self, s, context)
+
+
cdef class FunctionState:
cdef public set names_taken
cdef public object owner
@@ -40,7 +40,7 @@ cdef class FunctionState:
cdef public object return_from_error_cleanup_label # not used in __init__ ?
cdef public object exc_vars
- cdef public object current_except
+ cdef public object current_except
cdef public bint in_try_finally
cdef public bint can_trace
cdef public bint gil_owned
diff --git a/contrib/tools/cython/Cython/Compiler/Code.py b/contrib/tools/cython/Cython/Compiler/Code.py
index f43c4b2b8e..2d09bfae53 100644
--- a/contrib/tools/cython/Cython/Compiler/Code.py
+++ b/contrib/tools/cython/Cython/Compiler/Code.py
@@ -7,15 +7,15 @@
from __future__ import absolute_import
import cython
-cython.declare(os=object, re=object, operator=object, textwrap=object,
- Template=object, Naming=object, Options=object, StringEncoding=object,
+cython.declare(os=object, re=object, operator=object, textwrap=object,
+ Template=object, Naming=object, Options=object, StringEncoding=object,
Utils=object, SourceDescriptor=object, StringIOTree=object,
- DebugFlags=object, basestring=object, defaultdict=object,
- closing=object, partial=object)
+ DebugFlags=object, basestring=object, defaultdict=object,
+ closing=object, partial=object)
import os
import re
-import shutil
+import shutil
import sys
import operator
import textwrap
@@ -71,42 +71,42 @@ basicsize_builtins_map = {
}
uncachable_builtins = [
- # Global/builtin names that cannot be cached because they may or may not
- # be available at import time, for various reasons:
- ## - Py3.7+
- 'breakpoint', # might deserve an implementation in Cython
- ## - Py3.4+
- '__loader__',
- '__spec__',
- ## - Py3+
- 'BlockingIOError',
- 'BrokenPipeError',
- 'ChildProcessError',
- 'ConnectionAbortedError',
- 'ConnectionError',
- 'ConnectionRefusedError',
- 'ConnectionResetError',
- 'FileExistsError',
- 'FileNotFoundError',
- 'InterruptedError',
- 'IsADirectoryError',
- 'ModuleNotFoundError',
- 'NotADirectoryError',
- 'PermissionError',
- 'ProcessLookupError',
- 'RecursionError',
- 'ResourceWarning',
- #'StopAsyncIteration', # backported
- 'TimeoutError',
- '__build_class__',
- 'ascii', # might deserve an implementation in Cython
- #'exec', # implemented in Cython
- ## - Py2.7+
- 'memoryview',
- ## - platform specific
+ # Global/builtin names that cannot be cached because they may or may not
+ # be available at import time, for various reasons:
+ ## - Py3.7+
+ 'breakpoint', # might deserve an implementation in Cython
+ ## - Py3.4+
+ '__loader__',
+ '__spec__',
+ ## - Py3+
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'ModuleNotFoundError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ResourceWarning',
+ #'StopAsyncIteration', # backported
+ 'TimeoutError',
+ '__build_class__',
+ 'ascii', # might deserve an implementation in Cython
+ #'exec', # implemented in Cython
+ ## - Py2.7+
+ 'memoryview',
+ ## - platform specific
'WindowsError',
- ## - others
- '_', # e.g. used by gettext
+ ## - others
+ '_', # e.g. used by gettext
]
special_py_methods = set([
@@ -121,82 +121,82 @@ modifier_output_mapper = {
}.get
-class IncludeCode(object):
- """
- An include file and/or verbatim C code to be included in the
- generated sources.
- """
- # attributes:
- #
- # pieces {order: unicode}: pieces of C code to be generated.
- # For the included file, the key "order" is zero.
- # For verbatim include code, the "order" is the "order"
- # attribute of the original IncludeCode where this piece
- # of C code was first added. This is needed to prevent
- # duplication if the same include code is found through
- # multiple cimports.
- # location int: where to put this include in the C sources, one
- # of the constants INITIAL, EARLY, LATE
- # order int: sorting order (automatically set by increasing counter)
-
- # Constants for location. If the same include occurs with different
- # locations, the earliest one takes precedense.
- INITIAL = 0
- EARLY = 1
- LATE = 2
-
- counter = 1 # Counter for "order"
-
- def __init__(self, include=None, verbatim=None, late=True, initial=False):
- self.order = self.counter
- type(self).counter += 1
- self.pieces = {}
-
- if include:
- if include[0] == '<' and include[-1] == '>':
- self.pieces[0] = u'#include {0}'.format(include)
- late = False # system include is never late
- else:
- self.pieces[0] = u'#include "{0}"'.format(include)
-
- if verbatim:
- self.pieces[self.order] = verbatim
-
- if initial:
- self.location = self.INITIAL
- elif late:
- self.location = self.LATE
- else:
- self.location = self.EARLY
-
- def dict_update(self, d, key):
- """
- Insert `self` in dict `d` with key `key`. If that key already
- exists, update the attributes of the existing value with `self`.
- """
- if key in d:
- other = d[key]
- other.location = min(self.location, other.location)
- other.pieces.update(self.pieces)
- else:
- d[key] = self
-
- def sortkey(self):
- return self.order
-
- def mainpiece(self):
- """
- Return the main piece of C code, corresponding to the include
- file. If there was no include file, return None.
- """
- return self.pieces.get(0)
-
- def write(self, code):
- # Write values of self.pieces dict, sorted by the keys
- for k in sorted(self.pieces):
- code.putln(self.pieces[k])
-
-
+class IncludeCode(object):
+ """
+ An include file and/or verbatim C code to be included in the
+ generated sources.
+ """
+ # attributes:
+ #
+ # pieces {order: unicode}: pieces of C code to be generated.
+ # For the included file, the key "order" is zero.
+ # For verbatim include code, the "order" is the "order"
+ # attribute of the original IncludeCode where this piece
+ # of C code was first added. This is needed to prevent
+ # duplication if the same include code is found through
+ # multiple cimports.
+ # location int: where to put this include in the C sources, one
+ # of the constants INITIAL, EARLY, LATE
+ # order int: sorting order (automatically set by increasing counter)
+
+ # Constants for location. If the same include occurs with different
+ # locations, the earliest one takes precedense.
+ INITIAL = 0
+ EARLY = 1
+ LATE = 2
+
+ counter = 1 # Counter for "order"
+
+ def __init__(self, include=None, verbatim=None, late=True, initial=False):
+ self.order = self.counter
+ type(self).counter += 1
+ self.pieces = {}
+
+ if include:
+ if include[0] == '<' and include[-1] == '>':
+ self.pieces[0] = u'#include {0}'.format(include)
+ late = False # system include is never late
+ else:
+ self.pieces[0] = u'#include "{0}"'.format(include)
+
+ if verbatim:
+ self.pieces[self.order] = verbatim
+
+ if initial:
+ self.location = self.INITIAL
+ elif late:
+ self.location = self.LATE
+ else:
+ self.location = self.EARLY
+
+ def dict_update(self, d, key):
+ """
+ Insert `self` in dict `d` with key `key`. If that key already
+ exists, update the attributes of the existing value with `self`.
+ """
+ if key in d:
+ other = d[key]
+ other.location = min(self.location, other.location)
+ other.pieces.update(self.pieces)
+ else:
+ d[key] = self
+
+ def sortkey(self):
+ return self.order
+
+ def mainpiece(self):
+ """
+ Return the main piece of C code, corresponding to the include
+ file. If there was no include file, return None.
+ """
+ return self.pieces.get(0)
+
+ def write(self, code):
+ # Write values of self.pieces dict, sorted by the keys
+ for k in sorted(self.pieces):
+ code.putln(self.pieces[k])
+
+
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
@@ -256,15 +256,15 @@ class UtilityCodeBase(object):
if type == 'proto':
utility[0] = code
elif type == 'impl':
- utility[1] = code
+ utility[1] = code
else:
- all_tags = utility[2]
+ all_tags = utility[2]
if KEYWORDS_MUST_BE_BYTES:
type = type.encode('ASCII')
all_tags[type] = code
if tags:
- all_tags = utility[2]
+ all_tags = utility[2]
for name, values in tags.items():
if KEYWORDS_MUST_BE_BYTES:
name = name.encode('ASCII')
@@ -295,7 +295,7 @@ class UtilityCodeBase(object):
with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
all_lines = f.readlines()
- utilities = defaultdict(lambda: [None, None, {}])
+ utilities = defaultdict(lambda: [None, None, {}])
lines = []
tags = defaultdict(set)
utility = type = None
@@ -369,7 +369,7 @@ class UtilityCodeBase(object):
from_file = files[0]
utilities = cls.load_utilities_from_file(from_file)
- proto, impl, tags = utilities[util_code_name]
+ proto, impl, tags = utilities[util_code_name]
if tags:
orig_kwargs = kwargs.copy()
@@ -388,7 +388,7 @@ class UtilityCodeBase(object):
elif not values:
values = None
elif len(values) == 1:
- values = list(values)[0]
+ values = list(values)[0]
kwargs[name] = values
if proto is not None:
@@ -453,7 +453,7 @@ class UtilityCode(UtilityCodeBase):
hashes/equals by instance
proto C prototypes
- impl implementation code
+ impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
@@ -531,22 +531,22 @@ class UtilityCode(UtilityCodeBase):
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
- if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
+ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
- key = matchobj.groups()
+ key = matchobj.groups()
try:
- cname = replacements[key]
+ cname = replacements[key]
except KeyError:
- str_type, name = key
- cname = replacements[key] = output.get_py_string_const(
- StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
+ str_type, name = key
+ cname = replacements[key] = output.get_py_string_const(
+ StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
- impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
- assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
+ impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
+ assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return True, impl
def inject_unbound_methods(self, impl, output):
@@ -556,18 +556,18 @@ class UtilityCode(UtilityCodeBase):
return False, impl
def externalise(matchobj):
- type_cname, method_name, obj_cname, args = matchobj.groups()
- args = [arg.strip() for arg in args[1:].split(',')] if args else []
- assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
- return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
-
- impl = re.sub(
- r'CALL_UNBOUND_METHOD\('
- r'([a-zA-Z_]+),' # type cname
- r'\s*"([^"]+)",' # method name
- r'\s*([^),]+)' # object cname
- r'((?:,\s*[^),]+)*)' # args*
- r'\)', externalise, impl)
+ type_cname, method_name, obj_cname, args = matchobj.groups()
+ args = [arg.strip() for arg in args[1:].split(',')] if args else []
+ assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
+ return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
+
+ impl = re.sub(
+ r'CALL_UNBOUND_METHOD\('
+ r'([a-zA-Z_]+),' # type cname
+ r'\s*"([^"]+)",' # method name
+ r'\s*([^),]+)' # object cname
+ r'((?:,\s*[^),]+)*)' # args*
+ r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
return True, impl
@@ -679,7 +679,7 @@ class LazyUtilityCode(UtilityCodeBase):
available. Useful when you only have 'env' but not 'code'.
"""
__name__ = '<lazy>'
- requires = None
+ requires = None
def __init__(self, callback):
self.callback = callback
@@ -718,7 +718,7 @@ class FunctionState(object):
self.in_try_finally = 0
self.exc_vars = None
- self.current_except = None
+ self.current_except = None
self.can_trace = False
self.gil_owned = True
@@ -764,8 +764,8 @@ class FunctionState(object):
label += '_' + name
return label
- def new_yield_label(self, expr_type='yield'):
- label = self.new_label('resume_from_%s' % expr_type)
+ def new_yield_label(self, expr_type='yield'):
+ label = self.new_label('resume_from_%s' % expr_type)
num_and_label = (len(self.yield_labels) + 1, label)
self.yield_labels.append(num_and_label)
return num_and_label
@@ -1131,7 +1131,7 @@ class GlobalState(object):
'global_var',
'string_decls',
'decls',
- 'late_includes',
+ 'late_includes',
'all_the_rest',
'pystring_table',
'cached_builtins',
@@ -1399,8 +1399,8 @@ class GlobalState(object):
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
- def get_cached_unbound_method(self, type_cname, method_name):
- key = (type_cname, method_name)
+ def get_cached_unbound_method(self, type_cname, method_name):
+ key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
@@ -1408,18 +1408,18 @@ class GlobalState(object):
'umethod', '%s_%s' % (type_cname, method_name))
return cname
- def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
- # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
- utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
- self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
- cache_cname = self.get_cached_unbound_method(type_cname, method_name)
- args = [obj_cname] + arg_cnames
- return "__Pyx_%s(&%s, %s)" % (
- utility_code_name,
- cache_cname,
- ', '.join(args),
- )
-
+ def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
+ # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
+ utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
+ self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
+ cache_cname = self.get_cached_unbound_method(type_cname, method_name)
+ args = [obj_cname] + arg_cnames
+ return "__Pyx_%s(&%s, %s)" % (
+ utility_code_name,
+ cache_cname,
+ ', '.join(args),
+ )
+
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
@@ -1472,7 +1472,7 @@ class GlobalState(object):
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
- for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
+ for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % (
@@ -1606,13 +1606,13 @@ class GlobalState(object):
#
def lookup_filename(self, source_desc):
- entry = source_desc.get_filenametable_entry()
+ entry = source_desc.get_filenametable_entry()
try:
- index = self.filename_table[entry]
+ index = self.filename_table[entry]
except KeyError:
index = len(self.filename_list)
self.filename_list.append(source_desc)
- self.filename_table[entry] = index
+ self.filename_table[entry] = index
return index
def commented_file_contents(self, source_desc):
@@ -1693,7 +1693,7 @@ class CCodeWriter(object):
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
- sanity checking and forward compatibility). Created insertion points
+ sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
@@ -1807,7 +1807,7 @@ class CCodeWriter(object):
# Functions delegated to function scope
def new_label(self, name=None): return self.funcstate.new_label(name)
def new_error_label(self): return self.funcstate.new_error_label()
- def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
+ def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
def get_loop_labels(self): return self.funcstate.get_loop_labels()
def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
def new_loop_labels(self): return self.funcstate.new_loop_labels()
@@ -1918,7 +1918,7 @@ class CCodeWriter(object):
tmp_path = '%s.tmp%s' % (path, os.getpid())
with closing(Utils.open_new_file(tmp_path)) as f:
f.write(code)
- shutil.move(tmp_path, path)
+ shutil.move(tmp_path, path)
code = '#include "%s"\n' % path
self.put(code)
@@ -2093,12 +2093,12 @@ class CCodeWriter(object):
if entry.type.is_pyobject:
self.putln("__Pyx_XGIVEREF(%s);" % self.entry_as_pyobject(entry))
- def put_var_incref(self, entry, nanny=True):
+ def put_var_incref(self, entry, nanny=True):
if entry.type.is_pyobject:
- if nanny:
- self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
- else:
- self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry))
+ if nanny:
+ self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
+ else:
+ self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xincref(self, entry):
if entry.type.is_pyobject:
@@ -2122,8 +2122,8 @@ class CCodeWriter(object):
self.put_xdecref_memoryviewslice(cname, have_gil=have_gil)
return
- prefix = '__Pyx' if nanny else 'Py'
- X = 'X' if null_check else ''
+ prefix = '__Pyx' if nanny else 'Py'
+ X = 'X' if null_check else ''
if clear:
if clear_before_decref:
@@ -2147,12 +2147,12 @@ class CCodeWriter(object):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
- def put_var_xdecref(self, entry, nanny=True):
+ def put_var_xdecref(self, entry, nanny=True):
if entry.type.is_pyobject:
- if nanny:
- self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
- else:
- self.putln("Py_XDECREF(%s);" % self.entry_as_pyobject(entry))
+ if nanny:
+ self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
+ else:
+ self.putln("Py_XDECREF(%s);" % self.entry_as_pyobject(entry))
def put_var_decref_clear(self, entry):
self._put_var_decref_clear(entry, null_check=False)
@@ -2273,30 +2273,30 @@ class CCodeWriter(object):
"""
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
- if self.globalstate.directives['fast_gil']:
- self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
- else:
- self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
if not variable:
variable = '__pyx_gilstate_save'
if declare_gilstate:
self.put("PyGILState_STATE ")
- self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
+ self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
self.putln("#endif")
def put_release_ensured_gil(self, variable=None):
"""
Releases the GIL, corresponds to `put_ensure_gil`.
"""
- if self.globalstate.directives['fast_gil']:
- self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
- else:
- self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
if not variable:
variable = '__pyx_gilstate_save'
self.putln("#ifdef WITH_THREAD")
- self.putln("__Pyx_PyGILState_Release(%s);" % variable)
+ self.putln("__Pyx_PyGILState_Release(%s);" % variable)
self.putln("#endif")
def put_acquire_gil(self, variable=None):
@@ -2304,12 +2304,12 @@ class CCodeWriter(object):
Acquire the GIL. The thread's thread state must have been initialized
by a previous `put_release_gil`
"""
- if self.globalstate.directives['fast_gil']:
- self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
- else:
- self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
- self.putln("__Pyx_FastGIL_Forget();")
+ self.putln("__Pyx_FastGIL_Forget();")
if variable:
self.putln('_save = %s;' % variable)
self.putln("Py_BLOCK_THREADS")
@@ -2317,16 +2317,16 @@ class CCodeWriter(object):
def put_release_gil(self, variable=None):
"Release the GIL, corresponds to `put_acquire_gil`."
- if self.globalstate.directives['fast_gil']:
- self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
- else:
- self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
self.putln("PyThreadState *_save;")
self.putln("Py_UNBLOCK_THREADS")
if variable:
self.putln('%s = _save;' % variable)
- self.putln("__Pyx_FastGIL_Remember();")
+ self.putln("__Pyx_FastGIL_Remember();")
self.putln("#endif")
def declare_gilstate(self):
@@ -2410,7 +2410,7 @@ class CCodeWriter(object):
def put_finish_refcount_context(self):
self.putln("__Pyx_RefNannyFinishContext();")
- def put_add_traceback(self, qualified_name, include_cline=True):
+ def put_add_traceback(self, qualified_name, include_cline=True):
"""
Build a Python traceback for propagating exceptions.
@@ -2418,7 +2418,7 @@ class CCodeWriter(object):
"""
format_tuple = (
qualified_name,
- Naming.clineno_cname if include_cline else 0,
+ Naming.clineno_cname if include_cline else 0,
Naming.lineno_cname,
Naming.filename_cname,
)
@@ -2486,7 +2486,7 @@ class CCodeWriter(object):
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
-
+
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
diff --git a/contrib/tools/cython/Cython/Compiler/CodeGeneration.py b/contrib/tools/cython/Cython/Compiler/CodeGeneration.py
index e64049c7f5..15d445cb07 100644
--- a/contrib/tools/cython/Cython/Compiler/CodeGeneration.py
+++ b/contrib/tools/cython/Cython/Compiler/CodeGeneration.py
@@ -12,7 +12,7 @@ class ExtractPxdCode(VisitorTransform):
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
- A purer approach would be to separately compile the pxd code,
+ A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
diff --git a/contrib/tools/cython/Cython/Compiler/CythonScope.py b/contrib/tools/cython/Cython/Compiler/CythonScope.py
index 1c25d1a6b4..09f2bb3cfe 100644
--- a/contrib/tools/cython/Cython/Compiler/CythonScope.py
+++ b/contrib/tools/cython/Cython/Compiler/CythonScope.py
@@ -26,10 +26,10 @@ class CythonScope(ModuleScope):
cname='<error>')
entry.in_cinclude = True
- def is_cpp(self):
- # Allow C++ utility code in C++ contexts.
- return self.context.cpp
-
+ def is_cpp(self):
+ # Allow C++ utility code in C++ contexts.
+ return self.context.cpp
+
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
diff --git a/contrib/tools/cython/Cython/Compiler/Errors.py b/contrib/tools/cython/Cython/Compiler/Errors.py
index 9761b52c32..66fe05487c 100644
--- a/contrib/tools/cython/Cython/Compiler/Errors.py
+++ b/contrib/tools/cython/Cython/Compiler/Errors.py
@@ -10,7 +10,7 @@ except ImportError:
any_string_type = (bytes, str)
import sys
-from contextlib import contextmanager
+from contextlib import contextmanager
from ..Utils import open_new_file
from . import DebugFlags
@@ -146,8 +146,8 @@ def close_listing_file():
listing_file.close()
listing_file = None
-def report_error(err, use_stack=True):
- if error_stack and use_stack:
+def report_error(err, use_stack=True):
+ if error_stack and use_stack:
error_stack[-1].append(err)
else:
global num_errors
@@ -229,34 +229,34 @@ def warn_once(position, message, level=0):
error_stack = []
-
+
def hold_errors():
error_stack.append([])
-
+
def release_errors(ignore=False):
held_errors = error_stack.pop()
if not ignore:
for err in held_errors:
report_error(err)
-
+
def held_errors():
return error_stack[-1]
-# same as context manager:
-
-@contextmanager
-def local_errors(ignore=False):
- errors = []
- error_stack.append(errors)
- try:
- yield errors
- finally:
- release_errors(ignore=ignore)
-
-
+# same as context manager:
+
+@contextmanager
+def local_errors(ignore=False):
+ errors = []
+ error_stack.append(errors)
+ try:
+ yield errors
+ finally:
+ release_errors(ignore=ignore)
+
+
# this module needs a redesign to support parallel cythonisation, but
# for now, the following works at least in sequential compiler runs
diff --git a/contrib/tools/cython/Cython/Compiler/ExprNodes.py b/contrib/tools/cython/Cython/Compiler/ExprNodes.py
index 4a402f8126..4feeb2a037 100644
--- a/contrib/tools/cython/Cython/Compiler/ExprNodes.py
+++ b/contrib/tools/cython/Cython/Compiler/ExprNodes.py
@@ -7,7 +7,7 @@ from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
- StringEncoding=object, operator=object, local_errors=object, report_error=object,
+ StringEncoding=object, operator=object, local_errors=object, report_error=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
@@ -16,19 +16,19 @@ cython.declare(error=object, warning=object, warn_once=object, InternalError=obj
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
-import re
+import re
import sys
import copy
import os.path
import operator
-from .Errors import (
- error, warning, InternalError, CompileError, report_error, local_errors)
+from .Errors import (
+ error, warning, InternalError, CompileError, report_error, local_errors)
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
-from .Nodes import Node, utility_code_for_imports, analyse_type_annotation
+from .Nodes import Node, utility_code_for_imports, analyse_type_annotation
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
@@ -47,7 +47,7 @@ from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supporte
is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran,
pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type,
pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor)
-from .PyrexTypes import PythranExpr
+from .PyrexTypes import PythranExpr
try:
from __builtin__ import basestring
@@ -306,18 +306,18 @@ class ExprNode(Node):
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
- # is_numpy_attribute boolean Is a Numpy module attribute
+ # is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None
- # annotation ExprNode or None PEP526 annotation for names or expressions
+ # annotation ExprNode or None PEP526 annotation for names or expressions
result_ctype = None
type = None
- annotation = None
+ annotation = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
- is_numpy_attribute = False
+ is_numpy_attribute = False
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
@@ -498,13 +498,13 @@ class ExprNode(Node):
else:
return self.calculate_result_code()
- def pythran_result(self, type_=None):
- if is_pythran_supported_node_or_none(self):
- return to_pythran(self)
-
- assert(type_ is not None)
- return to_pythran(self, type_)
-
+ def pythran_result(self, type_=None):
+ if is_pythran_supported_node_or_none(self):
+ return to_pythran(self)
+
+ assert(type_ is not None)
+ return to_pythran(self, type_)
+
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
@@ -927,19 +927,19 @@ class ExprNode(Node):
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
- else:
- if src.type.writable_needed:
- dst_type.writable_needed = True
- if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
- copying=self.is_memview_copy_assignment):
- if src.type.dtype.same_as(dst_type.dtype):
- msg = "Memoryview '%s' not conformable to memoryview '%s'."
- tup = src.type, dst_type
- else:
- msg = "Different base types for memoryviews (%s, %s)"
- tup = src.type.dtype, dst_type.dtype
-
- error(self.pos, msg % tup)
+ else:
+ if src.type.writable_needed:
+ dst_type.writable_needed = True
+ if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
+ copying=self.is_memview_copy_assignment):
+ if src.type.dtype.same_as(dst_type.dtype):
+ msg = "Memoryview '%s' not conformable to memoryview '%s'."
+ tup = src.type, dst_type
+ else:
+ msg = "Different base types for memoryviews (%s, %s)"
+ tup = src.type.dtype, dst_type.dtype
+
+ error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
@@ -950,16 +950,16 @@ class ExprNode(Node):
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
- elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
- # We let the compiler decide whether this is valid
- return src
- elif is_pythran_expr(src.type):
- if is_pythran_supported_type(dst_type):
- # Match the case were a pythran expr is assigned to a value, or vice versa.
- # We let the C++ compiler decide whether this is valid or not!
- return src
- # Else, we need to convert the Pythran expression to a Python object
- src = CoerceToPyTypeNode(src, env, type=dst_type)
+ elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
+ # We let the compiler decide whether this is valid
+ return src
+ elif is_pythran_expr(src.type):
+ if is_pythran_supported_type(dst_type):
+ # Match the case were a pythran expr is assigned to a value, or vice versa.
+ # We let the C++ compiler decide whether this is valid or not!
+ return src
+ # Else, we need to convert the Pythran expression to a Python object
+ src = CoerceToPyTypeNode(src, env, type=dst_type)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
@@ -1141,13 +1141,13 @@ class NoneNode(PyConstNode):
def may_be_none(self):
return True
- def coerce_to(self, dst_type, env):
- if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
- # Catch this error early and loudly.
- error(self.pos, "Cannot assign None to %s" % dst_type)
- return super(NoneNode, self).coerce_to(dst_type, env)
-
+ def coerce_to(self, dst_type, env):
+ if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
+ # Catch this error early and loudly.
+ error(self.pos, "Cannot assign None to %s" % dst_type)
+ return super(NoneNode, self).coerce_to(dst_type, env)
+
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
@@ -1432,28 +1432,28 @@ def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
-
+
global_entry = env.global_scope().lookup(name)
if global_entry and global_entry.type and (
global_entry.type.is_extension_type
or global_entry.type.is_struct_or_union
or global_entry.type.is_builtin_type
or global_entry.type.is_cpp_class):
- return global_entry.type
-
+ return global_entry.type
+
from .TreeFragment import TreeFragment
- with local_errors(ignore=True):
- pos = (pos[0], pos[1], pos[2]-7)
- try:
- declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
- except CompileError:
- pass
- else:
- sizeof_node = declaration.root.stats[0].expr
- if isinstance(sizeof_node, SizeofTypeNode):
- sizeof_node = sizeof_node.analyse_types(env)
- if isinstance(sizeof_node, SizeofTypeNode):
- return sizeof_node.arg_type
+ with local_errors(ignore=True):
+ pos = (pos[0], pos[1], pos[2]-7)
+ try:
+ declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
+ except CompileError:
+ pass
+ else:
+ sizeof_node = declaration.root.stats[0].expr
+ if isinstance(sizeof_node, SizeofTypeNode):
+ sizeof_node = sizeof_node.analyse_types(env)
+ if isinstance(sizeof_node, SizeofTypeNode):
+ return sizeof_node.arg_type
return None
@@ -1507,7 +1507,7 @@ class BytesNode(ConstNode):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
- return node
+ return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
@@ -1516,10 +1516,10 @@ class BytesNode(ConstNode):
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
- # Exclude the case of passing a C string literal into a non-const C++ string.
- if not dst_type.is_cpp_class or dst_type.is_const:
- node.type = dst_type
- return node
+ # Exclude the case of passing a C string literal into a non-const C++ string.
+ if not dst_type.is_cpp_class or dst_type.is_const:
+ node.type = dst_type
+ return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
@@ -1740,15 +1740,15 @@ class IdentifierStringNode(StringNode):
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
- # value string imaginary part (float value)
+ # value string imaginary part (float value)
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
- self.constant_result = complex(0.0, float(self.value))
+ self.constant_result = complex(0.0, float(self.value))
def compile_time_value(self, denv):
- return complex(0.0, float(self.value))
+ return complex(0.0, float(self.value))
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
@@ -1763,7 +1763,7 @@ class ImagNode(AtomicExprNode):
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
- node.type = Builtin.complex_type
+ node.type = Builtin.complex_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
@@ -1802,7 +1802,7 @@ class NewExprNode(AtomicExprNode):
self.type = error_type
return
self.cpp_check(env)
- constructor = type.get_constructor(self.pos)
+ constructor = type.get_constructor(self.pos)
self.class_type = type
self.entry = constructor
self.type = constructor.type
@@ -1916,34 +1916,34 @@ class NameNode(AtomicExprNode):
return super(NameNode, self).coerce_to(dst_type, env)
- def declare_from_annotation(self, env, as_target=False):
- """Implements PEP 526 annotation typing in a fairly relaxed way.
-
- Annotations are ignored for global variables, Python class attributes and already declared variables.
- String literals are allowed and ignored.
- The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
- """
- if not env.directives['annotation_typing']:
- return
- if env.is_module_scope or env.is_py_class_scope:
- # annotations never create global cdef names and Python classes don't support them anyway
- return
- name = self.name
- if self.entry or env.lookup_here(name) is not None:
- # already declared => ignore annotation
- return
-
- annotation = self.annotation
- if annotation.is_string_literal:
- # name: "description" => not a type, but still a declared variable or attribute
- atype = None
- else:
- _, atype = analyse_type_annotation(annotation, env)
- if atype is None:
- atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
- self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
- self.entry.annotation = annotation
-
+ def declare_from_annotation(self, env, as_target=False):
+ """Implements PEP 526 annotation typing in a fairly relaxed way.
+
+ Annotations are ignored for global variables, Python class attributes and already declared variables.
+ String literals are allowed and ignored.
+ The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
+ """
+ if not env.directives['annotation_typing']:
+ return
+ if env.is_module_scope or env.is_py_class_scope:
+ # annotations never create global cdef names and Python classes don't support them anyway
+ return
+ name = self.name
+ if self.entry or env.lookup_here(name) is not None:
+ # already declared => ignore annotation
+ return
+
+ annotation = self.annotation
+ if annotation.is_string_literal:
+ # name: "description" => not a type, but still a declared variable or attribute
+ atype = None
+ else:
+ _, atype = analyse_type_annotation(annotation, env)
+ if atype is None:
+ atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
+ self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
+ self.entry.annotation = annotation
+
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
@@ -1983,9 +1983,9 @@ class NameNode(AtomicExprNode):
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
- if not self.entry and self.annotation is not None:
- # name : type = ...
- self.declare_from_annotation(env, as_target=True)
+ if not self.entry and self.annotation is not None:
+ # name : type = ...
+ self.declare_from_annotation(env, as_target=True)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
@@ -1996,27 +1996,27 @@ class NameNode(AtomicExprNode):
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
- if self.entry.as_module:
- # cimported modules namespace can shadow actual variables
- self.entry.is_variable = 1
+ if self.entry.as_module:
+ # cimported modules namespace can shadow actual variables
+ self.entry.is_variable = 1
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
entry = self.entry
- if entry is None:
- entry = env.lookup(self.name)
- if not entry:
- entry = env.declare_builtin(self.name, self.pos)
- if entry and entry.is_builtin and entry.is_const:
- self.is_literal = True
- if not entry:
- self.type = PyrexTypes.error_type
- return self
- self.entry = entry
- entry.used = 1
- if entry.type.is_buffer:
- from . import Buffer
- Buffer.used_buffer_aux_vars(entry)
+ if entry is None:
+ entry = env.lookup(self.name)
+ if not entry:
+ entry = env.declare_builtin(self.name, self.pos)
+ if entry and entry.is_builtin and entry.is_const:
+ self.is_literal = True
+ if not entry:
+ self.type = PyrexTypes.error_type
+ return self
+ self.entry = entry
+ entry.used = 1
+ if entry.type.is_buffer:
+ from . import Buffer
+ Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
@@ -2101,20 +2101,20 @@ class NameNode(AtomicExprNode):
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
- elif not (entry.is_const or entry.is_variable or
- entry.is_builtin or entry.is_cfunction or
- entry.is_cpp_class):
- if self.entry.as_variable:
- self.entry = self.entry.as_variable
- elif not self.is_cython_module:
- error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
-
- def is_cimported_module_without_shadow(self, env):
- if self.is_cython_module or self.cython_attribute:
- return False
- entry = self.entry or env.lookup(self.name)
- return entry.as_module and not entry.is_variable
-
+ elif not (entry.is_const or entry.is_variable or
+ entry.is_builtin or entry.is_cfunction or
+ entry.is_cpp_class):
+ if self.entry.as_variable:
+ self.entry = self.entry.as_variable
+ elif not self.is_cython_module:
+ error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
+
+ def is_cimported_module_without_shadow(self, env):
+ if self.is_cython_module or self.cython_attribute:
+ return False
+ entry = self.entry or env.lookup(self.name)
+ return entry.as_module and not entry.is_variable
+
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
@@ -2153,11 +2153,11 @@ class NameNode(AtomicExprNode):
def check_const(self):
entry = self.entry
- if entry is not None and not (
- entry.is_const or
- entry.is_cfunction or
- entry.is_builtin or
- entry.type.is_const):
+ if entry is not None and not (
+ entry.is_const or
+ entry.is_cfunction or
+ entry.is_builtin or
+ entry.type.is_const):
self.not_const()
return False
return True
@@ -2301,8 +2301,8 @@ class NameNode(AtomicExprNode):
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
- code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
- setter = '__Pyx_SetNameInClass'
+ code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
+ setter = '__Pyx_SetNameInClass'
else:
assert False, repr(entry)
code.put_error_if_neg(
@@ -2379,11 +2379,11 @@ class NameNode(AtomicExprNode):
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
-
- if is_pythran_expr(self.type):
- code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
+
+ if is_pythran_expr(self.type):
+ code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
elif result != self.result():
- code.putln('%s = %s;' % (self.result(), result))
+ code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
@@ -2833,7 +2833,7 @@ class IteratorNode(ExprNode):
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
- code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
+ code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
@@ -2967,18 +2967,18 @@ class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
- # with_stat WithStatNode the surrounding 'with' statement
- # args TupleNode or ResultStatNode the exception info tuple
- # await_expr AwaitExprNode the await expression of an 'async with' statement
+ # with_stat WithStatNode the surrounding 'with' statement
+ # args TupleNode or ResultStatNode the exception info tuple
+ # await_expr AwaitExprNode the await expression of an 'async with' statement
- subexprs = ['args', 'await_expr']
+ subexprs = ['args', 'await_expr']
test_if_run = True
- await_expr = None
+ await_expr = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
- if self.await_expr:
- self.await_expr = self.await_expr.analyse_types(env)
+ if self.await_expr:
+ self.await_expr = self.await_expr.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
@@ -3005,12 +3005,12 @@ class WithExitCallNode(ExprNode):
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
- if self.await_expr:
+ if self.await_expr:
# FIXME: result_var temp currently leaks into the closure
- self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
- code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
- self.await_expr.generate_post_assignment_code(code)
- self.await_expr.free_temps(code)
+ self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
+ code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
+ self.await_expr.generate_post_assignment_code(code)
+ self.await_expr.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
@@ -3170,27 +3170,27 @@ class JoinedStrNode(ExprNode):
is_ascii = False
if isinstance(node, UnicodeNode):
try:
- # most strings will be ASCII or at least Latin-1
+ # most strings will be ASCII or at least Latin-1
node.value.encode('iso8859-1')
max_char_value = '255'
node.value.encode('us-ascii')
is_ascii = True
except UnicodeEncodeError:
- if max_char_value != '255':
- # not ISO8859-1 => check BMP limit
- max_char = max(map(ord, node.value))
- if max_char < 0xD800:
- # BMP-only, no surrogate pairs used
- max_char_value = '65535'
- ulength = str(len(node.value))
- elif max_char >= 65536:
- # cleary outside of BMP, and not on a 16-bit Unicode system
- max_char_value = '1114111'
- ulength = str(len(node.value))
- else:
- # not really worth implementing a check for surrogate pairs here
- # drawback: C code can differ when generating on Py2 with 2-byte Unicode
- pass
+ if max_char_value != '255':
+ # not ISO8859-1 => check BMP limit
+ max_char = max(map(ord, node.value))
+ if max_char < 0xD800:
+ # BMP-only, no surrogate pairs used
+ max_char_value = '65535'
+ ulength = str(len(node.value))
+ elif max_char >= 65536:
+ # cleary outside of BMP, and not on a 16-bit Unicode system
+ max_char_value = '1114111'
+ ulength = str(len(node.value))
+ else:
+ # not really worth implementing a check for surrogate pairs here
+ # drawback: C code can differ when generating on Py2 with 2-byte Unicode
+ pass
else:
ulength = str(len(node.value))
elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
@@ -3260,7 +3260,7 @@ class FormattedValueNode(ExprNode):
self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
if self.c_format_spec is None:
self.value = self.value.coerce_to_pyobject(env)
- if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
+ if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
if self.value.type is unicode_type and not self.value.may_be_none():
# value is definitely a unicode string and we don't format it any special
return self.value
@@ -3390,7 +3390,7 @@ class _IndexingBaseNode(ExprNode):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
- basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
+ basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
@@ -3450,7 +3450,7 @@ class IndexNode(_IndexingBaseNode):
return False
if isinstance(self.index, SliceNode):
# slicing!
- if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
@@ -3562,10 +3562,10 @@ class IndexNode(_IndexingBaseNode):
if index_func is not None:
return index_func.type.return_type
- if is_pythran_expr(base_type) and is_pythran_expr(index_type):
- index_with_type = (self.index, index_type)
- return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
-
+ if is_pythran_expr(base_type) and is_pythran_expr(index_type):
+ index_with_type = (self.index, index_type)
+ return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
+
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
@@ -3657,14 +3657,14 @@ class IndexNode(_IndexingBaseNode):
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
- if self.index.type.is_unicode_char and base_type is not dict_type:
- # TODO: eventually fold into case below and remove warning, once people have adapted their code
- warning(self.pos,
- "Item lookup of unicode character codes now always converts to a Unicode string. "
- "Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
- self.index = self.index.coerce_to_pyobject(env)
- self.is_temp = 1
- elif self.index.type.is_int and base_type is not dict_type:
+ if self.index.type.is_unicode_char and base_type is not dict_type:
+ # TODO: eventually fold into case below and remove warning, once people have adapted their code
+ warning(self.pos,
+ "Item lookup of unicode character codes now always converts to a Unicode string. "
+ "Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
+ self.index = self.index.coerce_to_pyobject(env)
+ self.is_temp = 1
+ elif self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
@@ -3691,7 +3691,7 @@ class IndexNode(_IndexingBaseNode):
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
- elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
+ elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
@@ -3753,9 +3753,9 @@ class IndexNode(_IndexingBaseNode):
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
- elif self.type_indices is None:
- # Error recorded earlier.
- self.type = error_type
+ elif self.type_indices is None:
+ # Error recorded earlier.
+ self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
@@ -3792,45 +3792,45 @@ class IndexNode(_IndexingBaseNode):
else:
indices = [self.index]
- base = self.base
- base_type = base.type
+ base = self.base
+ base_type = base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
- if base.is_memview_slice:
- # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
- merged_indices = base.merged_indices(indices)
- if merged_indices is not None:
- base = base.base
- base_type = base.type
- indices = merged_indices
+ if base.is_memview_slice:
+ # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
+ merged_indices = base.merged_indices(indices)
+ if merged_indices is not None:
+ base = base.base
+ base_type = base.type
+ indices = merged_indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
- replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
+ replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
else:
- replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
- elif base_type.is_buffer or base_type.is_pythran_expr:
- if base_type.is_pythran_expr or len(indices) == base_type.ndim:
- # Buffer indexing
- is_buffer_access = True
- indices = [index.analyse_types(env) for index in indices]
- if base_type.is_pythran_expr:
- do_replacement = all(
- index.type.is_int or index.is_slice or index.type.is_pythran_expr
- for index in indices)
- if do_replacement:
- for i,index in enumerate(indices):
- if index.is_slice:
- index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
- index = index.analyse_types(env)
- indices[i] = index
- else:
- do_replacement = all(index.type.is_int for index in indices)
- if do_replacement:
- replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
- # On cloning, indices is cloned. Otherwise, unpack index into indices.
- assert not isinstance(self.index, CloneNode)
+ replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
+ elif base_type.is_buffer or base_type.is_pythran_expr:
+ if base_type.is_pythran_expr or len(indices) == base_type.ndim:
+ # Buffer indexing
+ is_buffer_access = True
+ indices = [index.analyse_types(env) for index in indices]
+ if base_type.is_pythran_expr:
+ do_replacement = all(
+ index.type.is_int or index.is_slice or index.type.is_pythran_expr
+ for index in indices)
+ if do_replacement:
+ for i,index in enumerate(indices):
+ if index.is_slice:
+ index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
+ index = index.analyse_types(env)
+ indices[i] = index
+ else:
+ do_replacement = all(index.type.is_int for index in indices)
+ if do_replacement:
+ replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
+ # On cloning, indices is cloned. Otherwise, unpack index into indices.
+ assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
@@ -3995,8 +3995,8 @@ class IndexNode(_IndexingBaseNode):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
-
- utility_code = None
+
+ utility_code = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
@@ -4006,38 +4006,38 @@ class IndexNode(_IndexingBaseNode):
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
- utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
+ utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
- utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
- elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
- # obj[str] is probably doing a dict lookup
- function = "__Pyx_PyObject_Dict_GetItem"
- utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
- else:
- function = "__Pyx_PyObject_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
+ # obj[str] is probably doing a dict lookup
+ function = "__Pyx_PyObject_Dict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ else:
+ function = "__Pyx_PyObject_GetItem"
code.globalstate.use_utility_code(
- TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
- utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
+ TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
- utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
+ utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
- utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
+ utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
- if utility_code is not None:
- code.globalstate.use_utility_code(utility_code)
-
+ if utility_code is not None:
+ code.globalstate.use_utility_code(utility_code)
+
if self.index.type.is_int:
index_code = self.index.result()
else:
@@ -4219,7 +4219,7 @@ class BufferIndexNode(_IndexingBaseNode):
indexing and slicing subclasses
"""
# self.indices are already analyzed
- if not self.base.is_name and not is_pythran_expr(self.base.type):
+ if not self.base.is_name and not is_pythran_expr(self.base.type):
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
@@ -4238,15 +4238,15 @@ class BufferIndexNode(_IndexingBaseNode):
return self
def analyse_buffer_index(self, env, getting):
- if is_pythran_expr(self.base.type):
- index_with_type_list = [(idx, idx.type) for idx in self.indices]
- self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
- else:
- self.base = self.base.coerce_to_simple(env)
- self.type = self.base.type.dtype
+ if is_pythran_expr(self.base.type):
+ index_with_type_list = [(idx, idx.type) for idx in self.indices]
+ self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
+ else:
+ self.base = self.base.coerce_to_simple(env)
+ self.type = self.base.type.dtype
self.buffer_type = self.base.type
- if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
+ if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
self.is_temp = True
def analyse_assignment(self, rhs):
@@ -4275,24 +4275,24 @@ class BufferIndexNode(_IndexingBaseNode):
base = base.arg
return base.type.get_entry(base)
- def get_index_in_temp(self, code, ivar):
- ret = code.funcstate.allocate_temp(
- PyrexTypes.widest_numeric_type(
- ivar.type,
- PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
- manage_ref=False)
- code.putln("%s = %s;" % (ret, ivar.result()))
- return ret
-
+ def get_index_in_temp(self, code, ivar):
+ ret = code.funcstate.allocate_temp(
+ PyrexTypes.widest_numeric_type(
+ ivar.type,
+ PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
+ manage_ref=False)
+ code.putln("%s = %s;" % (ret, ivar.result()))
+ return ret
+
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
- if self.in_nogil_context:
- if self.is_buffer_access or self.is_memview_index:
- if code.globalstate.directives['boundscheck']:
- warning(self.pos, "Use boundscheck(False) for faster access", level=1)
-
+ if self.in_nogil_context:
+ if self.is_buffer_access or self.is_memview_index:
+ if code.globalstate.directives['boundscheck']:
+ warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+
# Assign indices to temps of at least (s)size_t to allow further index calculations.
self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
@@ -4322,27 +4322,27 @@ class BufferIndexNode(_IndexingBaseNode):
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
- base_type = self.base.type
- if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
- obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
- # We have got to do this because we have to declare pythran objects
- # at the beginning of the functions.
- # Indeed, Cython uses "goto" statement for error management, and
- # RAII doesn't work with that kind of construction.
- # Moreover, the way Pythran expressions are made is that they don't
- # support move-assignation easily.
- # This, we explicitly destroy then in-place new objects in this
- # case.
- code.putln("__Pyx_call_destructor(%s);" % obj)
- code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
- code.putln("%s%s %s= %s;" % (
- obj,
- pythran_indexing_code(self.indices),
- op,
- rhs.pythran_result()))
+ base_type = self.base.type
+ if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
+ obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
+ # We have got to do this because we have to declare pythran objects
+ # at the beginning of the functions.
+ # Indeed, Cython uses "goto" statement for error management, and
+ # RAII doesn't work with that kind of construction.
+ # Moreover, the way Pythran expressions are made is that they don't
+ # support move-assignation easily.
+ # This, we explicitly destroy then in-place new objects in this
+ # case.
+ code.putln("__Pyx_call_destructor(%s);" % obj)
+ code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
+ code.putln("%s%s %s= %s;" % (
+ obj,
+ pythran_indexing_code(self.indices),
+ op,
+ rhs.pythran_result()))
code.funcstate.release_temp(obj)
- return
-
+ return
+
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
@@ -4364,15 +4364,15 @@ class BufferIndexNode(_IndexingBaseNode):
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
- if is_pythran_expr(self.base.type):
- res = self.result()
- code.putln("__Pyx_call_destructor(%s);" % res)
- code.putln("new (&%s) decltype(%s){%s%s};" % (
- res,
- res,
- self.base.pythran_result(),
- pythran_indexing_code(self.indices)))
- return
+ if is_pythran_expr(self.base.type):
+ res = self.result()
+ code.putln("__Pyx_call_destructor(%s);" % res)
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ res,
+ res,
+ self.base.pythran_result(),
+ pythran_indexing_code(self.indices)))
+ return
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
@@ -4398,15 +4398,15 @@ class MemoryViewIndexNode(BufferIndexNode):
# memoryviewslice indexing or slicing
from . import MemoryView
- self.is_pythran_mode = has_np_pythran(env)
+ self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
- if not getting:
- self.writable_needed = True
- if self.base.is_name or self.base.is_attribute:
- self.base.entry.type.writable_needed = True
-
+ if not getting:
+ self.writable_needed = True
+ if self.base.is_name or self.base.is_attribute:
+ self.base.entry.type.writable_needed = True
+
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
@@ -4554,37 +4554,37 @@ class MemoryViewSliceNode(MemoryViewIndexNode):
else:
return MemoryCopySlice(self.pos, self)
- def merged_indices(self, indices):
- """Return a new list of indices/slices with 'indices' merged into the current ones
- according to slicing rules.
- Is used to implement "view[i][j]" => "view[i, j]".
- Return None if the indices cannot (easily) be merged at compile time.
- """
- if not indices:
- return None
- # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
- new_indices = self.original_indices[:]
- indices = indices[:]
- for i, s in enumerate(self.original_indices):
- if s.is_slice:
- if s.start.is_none and s.stop.is_none and s.step.is_none:
- # Full slice found, replace by index.
- new_indices[i] = indices[0]
- indices.pop(0)
- if not indices:
- return new_indices
- else:
- # Found something non-trivial, e.g. a partial slice.
- return None
- elif not s.type.is_int:
- # Not a slice, not an integer index => could be anything...
- return None
- if indices:
- if len(new_indices) + len(indices) > self.base.type.ndim:
- return None
- new_indices += indices
- return new_indices
-
+ def merged_indices(self, indices):
+ """Return a new list of indices/slices with 'indices' merged into the current ones
+ according to slicing rules.
+ Is used to implement "view[i][j]" => "view[i, j]".
+ Return None if the indices cannot (easily) be merged at compile time.
+ """
+ if not indices:
+ return None
+ # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
+ new_indices = self.original_indices[:]
+ indices = indices[:]
+ for i, s in enumerate(self.original_indices):
+ if s.is_slice:
+ if s.start.is_none and s.stop.is_none and s.step.is_none:
+ # Full slice found, replace by index.
+ new_indices[i] = indices[0]
+ indices.pop(0)
+ if not indices:
+ return new_indices
+ else:
+ # Found something non-trivial, e.g. a partial slice.
+ return None
+ elif not s.type.is_int:
+ # Not a slice, not an integer index => could be anything...
+ return None
+ if indices:
+ if len(new_indices) + len(indices) > self.base.type.ndim:
+ return None
+ new_indices += indices
+ return new_indices
+
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
@@ -4757,7 +4757,7 @@ class SliceIndexNode(ExprNode):
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
- elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
@@ -4822,13 +4822,13 @@ class SliceIndexNode(ExprNode):
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
- if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
+ if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
- index_node = IndexNode(self.pos, index=index, base=self.base)
+ index_node = IndexNode(self.pos, index=index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
@@ -5296,61 +5296,61 @@ class SliceNode(ExprNode):
if self.is_literal:
code.put_giveref(self.py_result())
-class SliceIntNode(SliceNode):
- # start:stop:step in subscript list
- # This is just a node to hold start,stop and step nodes that can be
- # converted to integers. This does not generate a slice python object.
- #
- # start ExprNode
- # stop ExprNode
- # step ExprNode
-
- is_temp = 0
-
- def calculate_constant_result(self):
- self.constant_result = slice(
- self.start.constant_result,
- self.stop.constant_result,
- self.step.constant_result)
-
- def compile_time_value(self, denv):
- start = self.start.compile_time_value(denv)
- stop = self.stop.compile_time_value(denv)
- step = self.step.compile_time_value(denv)
- try:
- return slice(start, stop, step)
- except Exception as e:
- self.compile_time_value_error(e)
-
- def may_be_none(self):
- return False
-
- def analyse_types(self, env):
- self.start = self.start.analyse_types(env)
- self.stop = self.stop.analyse_types(env)
- self.step = self.step.analyse_types(env)
-
- if not self.start.is_none:
- self.start = self.start.coerce_to_integer(env)
- if not self.stop.is_none:
- self.stop = self.stop.coerce_to_integer(env)
- if not self.step.is_none:
- self.step = self.step.coerce_to_integer(env)
-
- if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
- self.is_literal = True
- self.is_temp = False
- return self
-
- def calculate_result_code(self):
- pass
-
- def generate_result_code(self, code):
- for a in self.start,self.stop,self.step:
- if isinstance(a, CloneNode):
- a.arg.result()
-
-
+class SliceIntNode(SliceNode):
+ # start:stop:step in subscript list
+ # This is just a node to hold start,stop and step nodes that can be
+ # converted to integers. This does not generate a slice python object.
+ #
+ # start ExprNode
+ # stop ExprNode
+ # step ExprNode
+
+ is_temp = 0
+
+ def calculate_constant_result(self):
+ self.constant_result = slice(
+ self.start.constant_result,
+ self.stop.constant_result,
+ self.step.constant_result)
+
+ def compile_time_value(self, denv):
+ start = self.start.compile_time_value(denv)
+ stop = self.stop.compile_time_value(denv)
+ step = self.step.compile_time_value(denv)
+ try:
+ return slice(start, stop, step)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ self.start = self.start.analyse_types(env)
+ self.stop = self.stop.analyse_types(env)
+ self.step = self.step.analyse_types(env)
+
+ if not self.start.is_none:
+ self.start = self.start.coerce_to_integer(env)
+ if not self.stop.is_none:
+ self.stop = self.stop.coerce_to_integer(env)
+ if not self.step.is_none:
+ self.step = self.step.coerce_to_integer(env)
+
+ if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
+ self.is_literal = True
+ self.is_temp = False
+ return self
+
+ def calculate_result_code(self):
+ pass
+
+ def generate_result_code(self, code):
+ for a in self.start,self.stop,self.step:
+ if isinstance(a, CloneNode):
+ a.arg.result()
+
+
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
@@ -5418,32 +5418,32 @@ class CallNode(ExprNode):
return False
return ExprNode.may_be_none(self)
- def set_py_result_type(self, function, func_type=None):
- if func_type is None:
- func_type = function.type
- if func_type is Builtin.type_type and (
- function.is_name and
- function.entry and
- function.entry.is_builtin and
- function.entry.name in Builtin.types_that_construct_their_instance):
- # calling a builtin type that returns a specific object type
- if function.entry.name == 'float':
- # the following will come true later on in a transform
- self.type = PyrexTypes.c_double_type
- self.result_ctype = PyrexTypes.c_double_type
- else:
- self.type = Builtin.builtin_types[function.entry.name]
- self.result_ctype = py_object_type
- self.may_return_none = False
- elif function.is_name and function.type_entry:
- # We are calling an extension type constructor. As long as we do not
- # support __new__(), the result type is clear
- self.type = function.type_entry.type
- self.result_ctype = py_object_type
- self.may_return_none = False
- else:
- self.type = py_object_type
-
+ def set_py_result_type(self, function, func_type=None):
+ if func_type is None:
+ func_type = function.type
+ if func_type is Builtin.type_type and (
+ function.is_name and
+ function.entry and
+ function.entry.is_builtin and
+ function.entry.name in Builtin.types_that_construct_their_instance):
+ # calling a builtin type that returns a specific object type
+ if function.entry.name == 'float':
+ # the following will come true later on in a transform
+ self.type = PyrexTypes.c_double_type
+ self.result_ctype = PyrexTypes.c_double_type
+ else:
+ self.type = Builtin.builtin_types[function.entry.name]
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ elif function.is_name and function.type_entry:
+ # We are calling an extension type constructor. As long as we do not
+ # support __new__(), the result type is clear
+ self.type = function.type_entry.type
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ else:
+ self.type = py_object_type
+
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
@@ -5461,10 +5461,10 @@ class CallNode(ExprNode):
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
- if not constructor:
- error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
- self.type = error_type
- return self
+ if not constructor:
+ error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
+ self.type = error_type
+ return self
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
@@ -5506,7 +5506,7 @@ class SimpleCallNode(CallNode):
has_optional_args = False
nogil = False
analysed = False
- overflowcheck = False
+ overflowcheck = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
@@ -5527,11 +5527,11 @@ class SimpleCallNode(CallNode):
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
- elif attr == 'typeof':
- if len(self.args) != 1:
- error(self.args.pos, "only one type allowed.")
- operand = self.args[0].analyse_types(env)
- return operand.type
+ elif attr == 'typeof':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ operand = self.args[0].analyse_types(env)
+ return operand.type
def explicit_args_kwds(self):
return self.args, None
@@ -5553,28 +5553,28 @@ class SimpleCallNode(CallNode):
function.obj = CloneNode(self.self)
func_type = self.function_type()
- self.is_numpy_call_with_exprs = False
+ self.is_numpy_call_with_exprs = False
if (has_np_pythran(env) and function.is_numpy_attribute and
pythran_is_numpy_func_supported(function)):
- has_pythran_args = True
+ has_pythran_args = True
self.arg_tuple = TupleNode(self.pos, args = self.args)
- self.arg_tuple = self.arg_tuple.analyse_types(env)
- for arg in self.arg_tuple.args:
- has_pythran_args &= is_pythran_supported_node_or_none(arg)
- self.is_numpy_call_with_exprs = bool(has_pythran_args)
- if self.is_numpy_call_with_exprs:
+ self.arg_tuple = self.arg_tuple.analyse_types(env)
+ for arg in self.arg_tuple.args:
+ has_pythran_args &= is_pythran_supported_node_or_none(arg)
+ self.is_numpy_call_with_exprs = bool(has_pythran_args)
+ if self.is_numpy_call_with_exprs:
env.add_include_file(pythran_get_func_include_file(function))
- return NumPyMethodCallNode.from_node(
- self,
+ return NumPyMethodCallNode.from_node(
+ self,
function_cname=pythran_functor(function),
- arg_tuple=self.arg_tuple,
+ arg_tuple=self.arg_tuple,
type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)),
- )
- elif func_type.is_pyobject:
- self.arg_tuple = TupleNode(self.pos, args = self.args)
+ )
+ elif func_type.is_pyobject:
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
- self.set_py_result_type(function, func_type)
+ self.set_py_result_type(function, func_type)
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
@@ -5669,7 +5669,7 @@ class SimpleCallNode(CallNode):
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
- "'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
error='PyExc_AttributeError',
format_args=[entry.name])
else:
@@ -5801,8 +5801,8 @@ class SimpleCallNode(CallNode):
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
- self.overflowcheck = env.directives['overflowcheck']
-
+ self.overflowcheck = env.directives['overflowcheck']
+
def calculate_result_code(self):
return self.c_call_code()
@@ -5842,11 +5842,11 @@ class SimpleCallNode(CallNode):
return False # skip allocation of unused result temp
return True
- def generate_evaluation_code(self, code):
- function = self.function
- if function.is_name or function.is_attribute:
- code.globalstate.use_entry_utility_code(function.entry)
-
+ def generate_evaluation_code(self, code):
+ function = self.function
+ if function.is_name or function.is_attribute:
+ code.globalstate.use_entry_utility_code(function.entry)
+
abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong')
is_signed_int = self.type.is_int and self.type.signed
if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames:
@@ -5858,59 +5858,59 @@ class SimpleCallNode(CallNode):
self.args[0].type.empty_declaration_code(),
code.error_goto(self.pos)))
- if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
- self.arg_tuple.args and self.arg_tuple.is_literal):
- super(SimpleCallNode, self).generate_evaluation_code(code)
- return
-
- # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
- arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
- subexprs = (self.self, self.coerced_self, function, arg)
- for subexpr in subexprs:
- if subexpr is not None:
- subexpr.generate_evaluation_code(code)
-
- code.mark_pos(self.pos)
- assert self.is_temp
- self.allocate_temp_result(code)
-
- if arg is None:
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCallNoArg", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
- self.result(),
- function.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
- else:
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCallOneArg", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
- self.result(),
- function.py_result(),
- arg.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
-
- code.put_gotref(self.py_result())
-
- for subexpr in subexprs:
- if subexpr is not None:
- subexpr.generate_disposal_code(code)
- subexpr.free_temps(code)
-
+ if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
+ self.arg_tuple.args and self.arg_tuple.is_literal):
+ super(SimpleCallNode, self).generate_evaluation_code(code)
+ return
+
+ # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
+ arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
+ subexprs = (self.self, self.coerced_self, function, arg)
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ assert self.is_temp
+ self.allocate_temp_result(code)
+
+ if arg is None:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallNoArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
+ self.result(),
+ function.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
+ self.result(),
+ function.py_result(),
+ arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_disposal_code(code)
+ subexpr.free_temps(code)
+
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
- arg_code = self.arg_tuple.py_result()
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCall", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
- self.result(),
- self.function.py_result(),
- arg_code,
- code.error_goto_if_null(self.result(), self.pos)))
+ arg_code = self.arg_tuple.py_result()
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ self.function.py_result(),
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
@@ -5938,7 +5938,7 @@ class SimpleCallNode(CallNode):
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
- exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
+ exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
@@ -5972,33 +5972,33 @@ class SimpleCallNode(CallNode):
class NumPyMethodCallNode(ExprNode):
- # Pythran call to a NumPy function or method.
- #
+ # Pythran call to a NumPy function or method.
+ #
# function_cname string the function/method to call
# arg_tuple TupleNode the arguments as an args tuple
-
+
subexprs = ['arg_tuple']
- is_temp = True
- may_return_none = True
-
- def generate_evaluation_code(self, code):
- code.mark_pos(self.pos)
- self.allocate_temp_result(code)
-
- assert self.arg_tuple.mult_factor is None
- args = self.arg_tuple.args
- for arg in args:
- arg.generate_evaluation_code(code)
-
- code.putln("// function evaluation code for numpy function")
- code.putln("__Pyx_call_destructor(%s);" % self.result())
+ is_temp = True
+ may_return_none = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ code.putln("// function evaluation code for numpy function")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s{}(%s)};" % (
- self.result(),
- self.result(),
+ self.result(),
+ self.result(),
self.function_cname,
- ", ".join(a.pythran_result() for a in args)))
-
-
+ ", ".join(a.pythran_result() for a in args)))
+
+
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
@@ -6323,37 +6323,37 @@ class PythonCapiCallNode(SimpleCallNode):
SimpleCallNode.__init__(self, pos, **kwargs)
-class CachedBuiltinMethodCallNode(CallNode):
- # Python call to a method of a known Python builtin (only created in transforms)
-
- subexprs = ['obj', 'args']
- is_temp = True
-
- def __init__(self, call_node, obj, method_name, args):
- super(CachedBuiltinMethodCallNode, self).__init__(
- call_node.pos,
- obj=obj, method_name=method_name, args=args,
- may_return_none=call_node.may_return_none,
- type=call_node.type)
-
- def may_be_none(self):
- if self.may_return_none is not None:
- return self.may_return_none
- return ExprNode.may_be_none(self)
-
- def generate_result_code(self, code):
- type_cname = self.obj.type.cname
- obj_cname = self.obj.py_result()
- args = [arg.py_result() for arg in self.args]
- call_code = code.globalstate.cached_unbound_method_call_code(
- obj_cname, type_cname, self.method_name, args)
- code.putln("%s = %s; %s" % (
- self.result(), call_code,
- code.error_goto_if_null(self.result(), self.pos)
- ))
- code.put_gotref(self.result())
-
-
+class CachedBuiltinMethodCallNode(CallNode):
+ # Python call to a method of a known Python builtin (only created in transforms)
+
+ subexprs = ['obj', 'args']
+ is_temp = True
+
+ def __init__(self, call_node, obj, method_name, args):
+ super(CachedBuiltinMethodCallNode, self).__init__(
+ call_node.pos,
+ obj=obj, method_name=method_name, args=args,
+ may_return_none=call_node.may_return_none,
+ type=call_node.type)
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ return ExprNode.may_be_none(self)
+
+ def generate_result_code(self, code):
+ type_cname = self.obj.type.cname
+ obj_cname = self.obj.py_result()
+ args = [arg.py_result() for arg in self.args]
+ call_code = code.globalstate.cached_unbound_method_call_code(
+ obj_cname, type_cname, self.method_name, args)
+ code.putln("%s = %s; %s" % (
+ self.result(), call_code,
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ code.put_gotref(self.result())
+
+
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
@@ -6412,7 +6412,7 @@ class GeneralCallNode(CallNode):
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
- self.set_py_result_type(self.function)
+ self.set_py_result_type(self.function)
self.is_temp = 1
return self
@@ -6579,7 +6579,7 @@ class AsTupleNode(ExprNode):
# arg ExprNode
subexprs = ['arg']
- is_temp = 1
+ is_temp = 1
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
@@ -6605,11 +6605,11 @@ class AsTupleNode(ExprNode):
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
- cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
+ cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
code.putln(
- "%s = %s(%s); %s" % (
+ "%s = %s(%s); %s" % (
self.result(),
- cfunc, self.arg.py_result(),
+ cfunc, self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
@@ -6900,9 +6900,9 @@ class AttributeNode(ExprNode):
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
- if self.is_cimported_module_without_shadow(env):
- error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
- return self
+ if self.is_cimported_module_without_shadow(env):
+ error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
+ return self
return None
def analyse_as_type_attribute(self, env):
@@ -7094,14 +7094,14 @@ class AttributeNode(ExprNode):
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
-
+
if not obj_type.is_pyobject and not obj_type.is_error:
- # Expose python methods for immutable objects.
- if (obj_type.is_string or obj_type.is_cpp_string
- or obj_type.is_buffer or obj_type.is_memoryviewslice
- or obj_type.is_numeric
- or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
- or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
+ # Expose python methods for immutable objects.
+ if (obj_type.is_string or obj_type.is_cpp_string
+ or obj_type.is_buffer or obj_type.is_memoryviewslice
+ or obj_type.is_numeric
+ or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
+ or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
@@ -7123,7 +7123,7 @@ class AttributeNode(ExprNode):
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
- msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
+ msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
@@ -7145,9 +7145,9 @@ class AttributeNode(ExprNode):
gil_message = "Accessing Python attribute"
- def is_cimported_module_without_shadow(self, env):
- return self.obj.is_cimported_module_without_shadow(env)
-
+ def is_cimported_module_without_shadow(self, env):
+ return self.obj.is_cimported_module_without_shadow(env)
+
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
@@ -7674,14 +7674,14 @@ class SequenceNode(ExprNode):
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
- code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
+ code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
- # < 0 => exception
+ # < 0 => exception
code.putln(code.error_goto(self.pos))
code.putln("}")
@@ -7912,10 +7912,10 @@ class TupleNode(SequenceNode):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
- if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
- for type in arg_types):
+ if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
+ for type in arg_types):
return tuple_type
- return env.declare_tuple_type(self.pos, arg_types).type
+ return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
@@ -7929,8 +7929,8 @@ class TupleNode(SequenceNode):
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
- not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
- for arg in self.args)):
+ not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
+ for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
@@ -8013,8 +8013,8 @@ class TupleNode(SequenceNode):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
-
- if self.is_literal or self.is_partly_literal:
+
+ if self.is_literal or self.is_partly_literal:
# The "mult_factor" is part of the deduplication if it is also constant, i.e. when
# we deduplicate the multiplied result. Otherwise, only deduplicate the constant part.
dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args)
@@ -8025,14 +8025,14 @@ class TupleNode(SequenceNode):
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
const_code.put_giveref(tuple_target)
- if self.is_literal:
- self.result_code = tuple_target
- else:
- code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
- self.result(), tuple_target, self.mult_factor.py_result(),
- code.error_goto_if_null(self.result(), self.pos)
+ if self.is_literal:
+ self.result_code = tuple_target
+ else:
+ code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)
))
- code.put_gotref(self.py_result())
+ code.put_gotref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
@@ -8065,10 +8065,10 @@ class ListNode(SequenceNode):
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
- with local_errors(ignore=True) as errors:
- self.original_args = list(self.args)
- node = SequenceNode.analyse_types(self, env)
- node.obj_conversion_errors = errors
+ with local_errors(ignore=True) as errors:
+ self.original_args = list(self.args)
+ node = SequenceNode.analyse_types(self, env)
+ node.obj_conversion_errors = errors
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
@@ -8244,7 +8244,7 @@ class ScopedExprNode(ExprNode):
code.putln('{ /* enter inner scope */')
py_entries = []
- for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
+ for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
if not entry.in_closure:
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
@@ -8255,13 +8255,13 @@ class ScopedExprNode(ExprNode):
return
# must free all local Python references at each exit point
- old_loop_labels = code.new_loop_labels()
+ old_loop_labels = code.new_loop_labels()
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
- self._generate_vars_cleanup(code, py_entries)
+ self._generate_vars_cleanup(code, py_entries)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
@@ -8270,7 +8270,7 @@ class ScopedExprNode(ExprNode):
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
- self._generate_vars_cleanup(code, py_entries)
+ self._generate_vars_cleanup(code, py_entries)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
@@ -8278,22 +8278,22 @@ class ScopedExprNode(ExprNode):
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
- def _generate_vars_cleanup(self, code, py_entries):
- for entry in py_entries:
- if entry.is_cglobal:
- code.put_var_gotref(entry)
- code.put_decref_set(entry.cname, "Py_None")
- else:
- code.put_var_xdecref_clear(entry)
-
+ def _generate_vars_cleanup(self, code, py_entries):
+ for entry in py_entries:
+ if entry.is_cglobal:
+ code.put_var_gotref(entry)
+ code.put_decref_set(entry.cname, "Py_None")
+ else:
+ code.put_var_xdecref_clear(entry)
+
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
- constant_result = not_a_constant
+ constant_result = not_a_constant
def infer_type(self, env):
return self.type
@@ -8721,12 +8721,12 @@ class DictNode(ExprNode):
return dict_type
def analyse_types(self, env):
- with local_errors(ignore=True) as errors:
- self.key_value_pairs = [
- item.analyse_types(env)
- for item in self.key_value_pairs
- ]
- self.obj_conversion_errors = errors
+ with local_errors(ignore=True) as errors:
+ self.key_value_pairs = [
+ item.analyse_types(env)
+ for item in self.key_value_pairs
+ ]
+ self.obj_conversion_errors = errors
return self
def may_be_none(self):
@@ -8788,9 +8788,9 @@ class DictNode(ExprNode):
if is_dict:
self.release_errors()
code.putln(
- "%s = __Pyx_PyDict_NewPresized(%d); %s" % (
+ "%s = __Pyx_PyDict_NewPresized(%d); %s" % (
self.result(),
- len(self.key_value_pairs),
+ len(self.key_value_pairs),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
@@ -9262,19 +9262,19 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
else:
default_args.append(arg)
if arg.annotation:
- arg.annotation = self.analyse_annotation(env, arg.annotation)
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
- arg.annotation = self.analyse_annotation(env, arg.annotation)
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
- annotation = self.def_node.return_type_annotation
- if annotation:
- annotation = self.analyse_annotation(env, annotation)
- self.def_node.return_type_annotation = annotation
- annotations.append((annotation.pos, StringEncoding.EncodedString("return"), annotation))
+ annotation = self.def_node.return_type_annotation
+ if annotation:
+ annotation = self.analyse_annotation(env, annotation)
+ self.def_node.return_type_annotation = annotation
+ annotations.append((annotation.pos, StringEncoding.EncodedString("return"), annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
@@ -9289,7 +9289,7 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
- allow_pyobject=False, allow_memoryview=True)
+ allow_pyobject=False, allow_memoryview=True)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
@@ -9351,20 +9351,20 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
- def analyse_annotation(self, env, annotation):
- if annotation is None:
- return None
- atype = annotation.analyse_as_type(env)
- if atype is not None:
- # Keep parsed types as strings as they might not be Python representable.
- annotation = UnicodeNode(
- annotation.pos,
- value=StringEncoding.EncodedString(atype.declaration_code('', for_display=True)))
- annotation = annotation.analyse_types(env)
- if not annotation.type.is_pyobject:
- annotation = annotation.coerce_to_pyobject(env)
- return annotation
-
+ def analyse_annotation(self, env, annotation):
+ if annotation is None:
+ return None
+ atype = annotation.analyse_as_type(env)
+ if atype is not None:
+ # Keep parsed types as strings as they might not be Python representable.
+ annotation = UnicodeNode(
+ annotation.pos,
+ value=StringEncoding.EncodedString(atype.declaration_code('', for_display=True)))
+ annotation = annotation.analyse_types(env)
+ if not annotation.type.is_pyobject:
+ annotation = annotation.coerce_to_pyobject(env)
+ return annotation
+
def may_be_none(self):
return False
@@ -9536,13 +9536,13 @@ class CodeObjectNode(ExprNode):
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
- # XXX Use get_description() to set arcadia root relative filename
- file_path = StringEncoding.bytes_literal(func.pos[0].get_description().encode('utf8'), 'utf8')
+ # XXX Use get_description() to set arcadia root relative filename
+ file_path = StringEncoding.bytes_literal(func.pos[0].get_description().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
- # This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
- flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
-
+ # This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
+ flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
+
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
@@ -9729,11 +9729,11 @@ class YieldExprNode(ExprNode):
label_num = 0
is_yield_from = False
is_await = False
- in_async_gen = False
+ in_async_gen = False
expr_keyword = 'yield'
def analyse_types(self, env):
- if not self.label_num or (self.is_yield_from and self.in_async_gen):
+ if not self.label_num or (self.is_yield_from and self.in_async_gen):
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
@@ -9764,8 +9764,8 @@ class YieldExprNode(ExprNode):
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
- label_num, label_name = code.new_yield_label(
- self.expr_keyword.replace(' ', '_'))
+ label_num, label_name = code.new_yield_label(
+ self.expr_keyword.replace(' ', '_'))
code.use_label(label_name)
saved = []
@@ -9778,30 +9778,30 @@ class YieldExprNode(ExprNode):
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
- profile = code.globalstate.directives['profile']
- linetrace = code.globalstate.directives['linetrace']
- if profile or linetrace:
- code.put_trace_return(Naming.retval_cname,
- nogil=not code.funcstate.gil_owned)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
-
- if code.funcstate.current_except is not None:
- # inside of an except block => save away currently handled exception
- code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
- else:
- # no exceptions being handled => restore exception state of caller
- code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
-
- code.putln("/* return from %sgenerator, %sing value */" % (
- 'async ' if self.in_async_gen else '',
- 'await' if self.is_await else 'yield'))
+
+ if code.funcstate.current_except is not None:
+ # inside of an except block => save away currently handled exception
+ code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
+ else:
+ # no exceptions being handled => restore exception state of caller
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+
+ code.putln("/* return from %sgenerator, %sing value */" % (
+ 'async ' if self.in_async_gen else '',
+ 'await' if self.is_await else 'yield'))
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
- if self.in_async_gen and not self.is_await:
- # __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
- code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
- else:
- code.putln("return %s;" % Naming.retval_cname)
+ if self.in_async_gen and not self.is_await:
+ # __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
+ code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
+ else:
+ code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
@@ -9809,19 +9809,19 @@ class YieldExprNode(ExprNode):
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
- self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
+ self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
- def generate_sent_value_handling_code(self, code, value_cname):
- code.putln(code.error_goto_if_null(value_cname, self.pos))
+ def generate_sent_value_handling_code(self, code, value_cname):
+ code.putln(code.error_goto_if_null(value_cname, self.pos))
-class _YieldDelegationExprNode(YieldExprNode):
+class _YieldDelegationExprNode(YieldExprNode):
def yield_from_func(self, code):
- raise NotImplementedError()
+ raise NotImplementedError()
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
@@ -9855,31 +9855,31 @@ class _YieldDelegationExprNode(YieldExprNode):
code.put_gotref(self.result())
def handle_iteration_exception(self, code):
- code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
+ code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (exc_type) {")
- code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
- " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
+ code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
-class YieldFromExprNode(_YieldDelegationExprNode):
- # "yield from GEN" expression
- is_yield_from = True
- expr_keyword = 'yield from'
-
- def coerce_yield_argument(self, env):
- if not self.arg.type.is_string:
- # FIXME: support C arrays and C++ iterators?
- error(self.pos, "yielding from non-Python object not supported")
- self.arg = self.arg.coerce_to_pyobject(env)
-
- def yield_from_func(self, code):
- code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
- return "__Pyx_Generator_Yield_From"
-
-
-class AwaitExprNode(_YieldDelegationExprNode):
+class YieldFromExprNode(_YieldDelegationExprNode):
+ # "yield from GEN" expression
+ is_yield_from = True
+ expr_keyword = 'yield from'
+
+ def coerce_yield_argument(self, env):
+ if not self.arg.type.is_string:
+ # FIXME: support C arrays and C++ iterators?
+ error(self.pos, "yielding from non-Python object not supported")
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def yield_from_func(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
+ return "__Pyx_Generator_Yield_From"
+
+
+class AwaitExprNode(_YieldDelegationExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
@@ -9903,30 +9903,30 @@ class AwaitIterNextExprNode(AwaitExprNode):
#
# Breaks out of loop on StopAsyncIteration exception.
- def _generate_break(self, code):
+ def _generate_break(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
- code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
- code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
- " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
- " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
+ code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
+ code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
+ " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
-
- def fetch_iteration_result(self, code):
- assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
- self._generate_break(code)
+
+ def fetch_iteration_result(self, code):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ self._generate_break(code)
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
- def generate_sent_value_handling_code(self, code, value_cname):
- assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
- code.putln("if (unlikely(!%s)) {" % value_cname)
- self._generate_break(code)
- # all non-break exceptions are errors, as in parent class
- code.putln(code.error_goto(self.pos))
- code.putln("}")
-
+ def generate_sent_value_handling_code(self, code, value_cname):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ code.putln("if (unlikely(!%s)) {" % value_cname)
+ self._generate_break(code)
+ # all non-break exceptions are errors, as in parent class
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
@@ -10071,10 +10071,10 @@ class UnopNode(ExprNode):
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
- if self.is_pythran_operation(env):
- self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
- self.is_temp = 1
- elif self.is_py_operation():
+ if self.is_pythran_operation(env):
+ self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
+ self.is_temp = 1
+ elif self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
@@ -10090,11 +10090,11 @@ class UnopNode(ExprNode):
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
- def is_pythran_operation(self, env):
- np_pythran = has_np_pythran(env)
- op_type = self.operand.type
- return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
-
+ def is_pythran_operation(self, env):
+ np_pythran = has_np_pythran(env)
+ op_type = self.operand.type
+ return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
+
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
@@ -10107,15 +10107,15 @@ class UnopNode(ExprNode):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
- if self.type.is_pythran_expr:
- code.putln("// Pythran unaryop")
- code.putln("__Pyx_call_destructor(%s);" % self.result())
- code.putln("new (&%s) decltype(%s){%s%s};" % (
- self.result(),
- self.result(),
- self.operator,
- self.operand.pythran_result()))
- elif self.operand.type.is_pyobject:
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran unaryop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ self.result(),
+ self.result(),
+ self.operator,
+ self.operand.pythran_result()))
+ elif self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
@@ -10337,10 +10337,10 @@ class AmpersandNode(CUnopNode):
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
- self.error("Cannot take address of Python %s" % (
- "variable '%s'" % self.operand.name if self.operand.is_name else
- "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
- "object"))
+ self.error("Cannot take address of Python %s" % (
+ "variable '%s'" % self.operand.name if self.operand.is_name else
+ "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
+ "object"))
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
@@ -10675,7 +10675,7 @@ class CythonArrayNode(ExprNode):
def allocate_temp_result(self, code):
if self.temp_code:
- raise RuntimeError("temp allocated multiple times")
+ raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
@@ -10683,9 +10683,9 @@ class CythonArrayNode(ExprNode):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
- cython_scope = env.global_scope().context.cython_scope
- cython_scope.load_cythonscope()
- return cython_scope.viewscope.lookup("array").type
+ cython_scope = env.global_scope().context.cython_scope
+ cython_scope.load_cythonscope()
+ return cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
@@ -10794,7 +10794,7 @@ class SizeofTypeNode(SizeofNode):
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
- node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
+ node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
@@ -10942,10 +10942,10 @@ class TypeofNode(ExprNode):
self.literal = literal.coerce_to_pyobject(env)
return self
- def analyse_as_type(self, env):
- self.operand = self.operand.analyse_types(env)
- return self.operand.type
-
+ def analyse_as_type(self, env):
+ self.operand = self.operand.analyse_types(env)
+ return self.operand.type
+
def may_be_none(self):
return False
@@ -11039,7 +11039,7 @@ class BinopNode(ExprNode):
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
- self.operand2.infer_type(env), env)
+ self.operand2.infer_type(env), env)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
@@ -11048,15 +11048,15 @@ class BinopNode(ExprNode):
return self
def analyse_operation(self, env):
- if self.is_pythran_operation(env):
- self.type = self.result_type(self.operand1.type,
- self.operand2.type, env)
- assert self.type.is_pythran_expr
- self.is_temp = 1
- elif self.is_py_operation():
+ if self.is_pythran_operation(env):
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type, env)
+ assert self.type.is_pythran_expr
+ self.is_temp = 1
+ elif self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
- self.operand2.type, env)
+ self.operand2.type, env)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
@@ -11070,15 +11070,15 @@ class BinopNode(ExprNode):
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
- def is_pythran_operation(self, env):
- return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
-
- def is_pythran_operation_types(self, type1, type2, env):
- # Support only expr op supported_type, or supported_type op expr
- return has_np_pythran(env) and \
- (is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
- (is_pythran_expr(type1) or is_pythran_expr(type2))
-
+ def is_pythran_operation(self, env):
+ return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
+
+ def is_pythran_operation_types(self, type1, type2, env):
+ # Support only expr op supported_type, or supported_type op expr
+ return has_np_pythran(env) and \
+ (is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
+ (is_pythran_expr(type1) or is_pythran_expr(type2))
+
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
@@ -11106,9 +11106,9 @@ class BinopNode(ExprNode):
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
- def result_type(self, type1, type2, env):
- if self.is_pythran_operation_types(type1, type2, env):
- return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ def result_type(self, type1, type2, env):
+ if self.is_pythran_operation_types(type1, type2, env):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
@@ -11126,8 +11126,8 @@ class BinopNode(ExprNode):
if result_type is not None:
return result_type
return py_object_type
- elif type1.is_error or type2.is_error:
- return PyrexTypes.error_type
+ elif type1.is_error or type2.is_error:
+ return PyrexTypes.error_type
else:
return self.compute_c_result_type(type1, type2)
@@ -11150,9 +11150,9 @@ class BinopNode(ExprNode):
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
- if self.type.is_pythran_expr:
- code.putln("// Pythran binop")
- code.putln("__Pyx_call_destructor(%s);" % self.result())
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran binop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
if self.operator == '**':
code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % (
self.result(),
@@ -11166,7 +11166,7 @@ class BinopNode(ExprNode):
self.operand1.pythran_result(),
self.operator,
self.operand2.pythran_result()))
- elif self.operand1.type.is_pyobject:
+ elif self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
@@ -11328,11 +11328,11 @@ class NumBinopNode(BinopNode):
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
- if is_pythran_expr(self.type):
- result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
- else:
- result1, result2 = self.operand1.result(), self.operand2.result()
- return "(%s %s %s)" % (result1, self.operator, result2)
+ if is_pythran_expr(self.type):
+ result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
+ else:
+ result1, result2 = self.operand1.result(), self.operand2.result()
+ return "(%s %s %s)" % (result1, self.operator, result2)
else:
func = self.type.binary_op(self.operator)
if func is None:
@@ -11398,7 +11398,7 @@ class AddNode(NumBinopNode):
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
- string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
@@ -11462,7 +11462,7 @@ class MulNode(NumBinopNode):
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
- string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
@@ -11532,7 +11532,7 @@ class DivNode(NumBinopNode):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
- self.operand2.infer_type(env), env)
+ self.operand2.infer_type(env), env)
def analyse_operation(self, env):
self._check_truedivision(env)
@@ -11663,20 +11663,20 @@ class DivNode(NumBinopNode):
self.operand2.result())
-_find_formatting_types = re.compile(
- br"%"
- br"(?:%|" # %%
- br"(?:\([^)]+\))?" # %(name)
- br"[-+#,0-9 ]*([a-z])" # %.2f etc.
- br")").findall
-
-# These format conversion types can never trigger a Unicode string conversion in Py2.
-_safe_bytes_formats = set([
- # Excludes 's' and 'r', which can generate non-bytes strings.
- b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
-])
-
-
+_find_formatting_types = re.compile(
+ br"%"
+ br"(?:%|" # %%
+ br"(?:\([^)]+\))?" # %(name)
+ br"[-+#,0-9 ]*([a-z])" # %.2f etc.
+ br")").findall
+
+# These format conversion types can never trigger a Unicode string conversion in Py2.
+_safe_bytes_formats = set([
+ # Excludes 's' and 'r', which can generate non-bytes strings.
+ b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
+])
+
+
class ModNode(DivNode):
# '%' operator.
@@ -11686,7 +11686,7 @@ class ModNode(DivNode):
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
- # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
+ # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
@@ -11696,11 +11696,11 @@ class ModNode(DivNode):
return type2
elif type2.is_numeric:
return type1
- elif self.operand1.is_string_literal:
- if type1 is str_type or type1 is bytes_type:
- if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
- return type1
- return basestring_type
+ elif self.operand1.is_string_literal:
+ if type1 is str_type or type1 is bytes_type:
+ if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
+ return type1
+ return basestring_type
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
@@ -11905,7 +11905,7 @@ class BoolBinopNode(ExprNode):
operator=self.operator,
operand1=operand1, operand2=operand2)
- def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
@@ -11914,20 +11914,20 @@ class BoolBinopNode(ExprNode):
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
- code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
- code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
- result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
+ result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
- self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
+ self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
@@ -12012,7 +12012,7 @@ class BoolBinopResultNode(ExprNode):
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
- def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
@@ -12055,7 +12055,7 @@ class BoolBinopResultNode(ExprNode):
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
- code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
+ code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
@@ -12275,22 +12275,22 @@ class CmpNode(object):
new_common_type = None
# catch general errors
- if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
- type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
+ if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
+ type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
- if (op not in ('==', '!=')
- and (type1.is_complex or type1.is_numeric)
- and (type2.is_complex or type2.is_numeric)):
+ if (op not in ('==', '!=')
+ and (type1.is_complex or type1.is_numeric)
+ and (type2.is_complex or type2.is_numeric)):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
- new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
+ new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
elif type2.is_pyobject:
- new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
+ new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
@@ -12416,11 +12416,11 @@ class CmpNode(object):
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
- elif self.operand2.type is Builtin.set_type:
- self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
- self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
- self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
- return True
+ elif self.operand2.type is Builtin.set_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
+ return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
@@ -12547,14 +12547,14 @@ class PrimaryCmpNode(ExprNode, CmpNode):
is_memslice_nonecheck = False
def infer_type(self, env):
- type1 = self.operand1.infer_type(env)
- type2 = self.operand2.infer_type(env)
-
- if is_pythran_expr(type1) or is_pythran_expr(type2):
- if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
- return PythranExpr(pythran_binop_type(self.operator, type1, type2))
-
- # TODO: implement this for other types.
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+
+ # TODO: implement this for other types.
return py_object_type
def type_dependencies(self, env):
@@ -12577,14 +12577,14 @@ class PrimaryCmpNode(ExprNode, CmpNode):
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
- type1 = self.operand1.type
- type2 = self.operand2.type
- if is_pythran_expr(type1) or is_pythran_expr(type2):
- if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
- self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
- self.is_pycmp = False
- return self
-
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ self.is_pycmp = False
+ return self
+
if self.analyse_memoryviewslice_comparison(env):
return self
@@ -12724,19 +12724,19 @@ class PrimaryCmpNode(ExprNode, CmpNode):
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
- operand1, operand2 = self.operand1, self.operand2
- if operand1.type.is_complex:
+ operand1, operand2 = self.operand1, self.operand2
+ if operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
- operand1.type.binary_op('=='),
- operand1.result(),
- operand2.result())
+ operand1.type.binary_op('=='),
+ operand1.result(),
+ operand2.result())
elif self.is_c_string_contains():
- if operand2.type is unicode_type:
+ if operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
@@ -12747,18 +12747,18 @@ class PrimaryCmpNode(ExprNode, CmpNode):
return "(%s%s(%s, %s))" % (
negation,
method,
- operand2.result(),
- operand1.result())
- else:
- if is_pythran_expr(self.type):
- result1, result2 = operand1.pythran_result(), operand2.pythran_result()
- else:
- result1, result2 = operand1.result(), operand2.result()
- if self.is_memslice_nonecheck:
- if operand1.type.is_memoryviewslice:
- result1 = "((PyObject *) %s.memview)" % result1
- else:
- result2 = "((PyObject *) %s.memview)" % result2
+ operand2.result(),
+ operand1.result())
+ else:
+ if is_pythran_expr(self.type):
+ result1, result2 = operand1.pythran_result(), operand2.pythran_result()
+ else:
+ result1, result2 = operand1.result(), operand2.result()
+ if self.is_memslice_nonecheck:
+ if operand1.type.is_memoryviewslice:
+ result1 = "((PyObject *) %s.memview)" % result1
+ else:
+ result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
@@ -12979,12 +12979,12 @@ class CoerceToMemViewSliceNode(CoercionNode):
self.type.create_from_py_utility_code(env)
def generate_result_code(self, code):
- code.putln(self.type.from_py_call_code(
- self.arg.py_result(),
- self.result(),
- self.pos,
- code
- ))
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(),
+ self.result(),
+ self.pos,
+ code
+ ))
class CastNode(CoercionNode):
@@ -13043,15 +13043,15 @@ class PyTypeTestNode(CoercionNode):
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
- def reanalyse(self):
- if self.type != self.arg.type or not self.arg.is_temp:
- return self
- if not self.type.typeobj_is_available():
- return self
- if self.arg.may_be_none() and self.notnone:
- return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
- return self.arg
-
+ def reanalyse(self):
+ if self.type != self.arg.type or not self.arg.is_temp:
+ return self
+ if not self.type.typeobj_is_available():
+ return self
+ if self.arg.may_be_none() and self.notnone:
+ return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
+ return self.arg
+
def calculate_constant_result(self):
# FIXME
pass
@@ -13100,7 +13100,7 @@ class NoneCheckNode(CoercionNode):
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
- exception_format_args=()):
+ exception_format_args=()):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
@@ -13136,19 +13136,19 @@ class NoneCheckNode(CoercionNode):
else:
raise Exception("unsupported type")
- @classmethod
- def generate(cls, arg, code, exception_message,
- exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
- node = cls(arg, exception_type_cname, exception_message, exception_format_args)
- node.in_nogil_context = in_nogil_context
- node.put_nonecheck(code)
-
- @classmethod
- def generate_if_needed(cls, arg, code, exception_message,
- exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
- if arg.may_be_none():
- cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
-
+ @classmethod
+ def generate(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ node = cls(arg, exception_type_cname, exception_message, exception_format_args)
+ node.in_nogil_context = in_nogil_context
+ node.put_nonecheck(code)
+
+ @classmethod
+ def generate_if_needed(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ if arg.may_be_none():
+ cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
+
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
@@ -13323,15 +13323,15 @@ class CoerceFromPyTypeNode(CoercionNode):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
- from_py_function = None
- # for certain source types, we can do better than the generic coercion
- if self.type.is_string and self.arg.type is bytes_type:
- if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
- from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
- NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
-
+ from_py_function = None
+ # for certain source types, we can do better than the generic coercion
+ if self.type.is_string and self.arg.type is bytes_type:
+ if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
+ from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
+ NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
+
code.putln(self.type.from_py_call_code(
- self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
+ self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
@@ -13351,7 +13351,7 @@ class CoerceToBooleanNode(CoercionNode):
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
- Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
@@ -13380,9 +13380,9 @@ class CoerceToBooleanNode(CoercionNode):
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
- checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
- checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
- code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
+ checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
+ checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
+ code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
diff --git a/contrib/tools/cython/Cython/Compiler/FusedNode.py b/contrib/tools/cython/Cython/Compiler/FusedNode.py
index 26d6ffd3d6..e86c0d30a7 100644
--- a/contrib/tools/cython/Cython/Compiler/FusedNode.py
+++ b/contrib/tools/cython/Cython/Compiler/FusedNode.py
@@ -6,7 +6,7 @@ from . import (ExprNodes, PyrexTypes, MemoryView,
ParseTreeTransforms, StringEncoding, Errors)
from .ExprNodes import CloneNode, ProxyNode, TupleNode
from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
-from ..Utils import OrderedSet
+from ..Utils import OrderedSet
class FusedCFuncDefNode(StatListNode):
@@ -136,27 +136,27 @@ class FusedCFuncDefNode(StatListNode):
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
- new_cfunc_entries = []
+ new_cfunc_entries = []
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
- # Make the types in our CFuncType specific.
+ # Make the types in our CFuncType specific.
type = copied_node.type.specialize(fused_to_specific)
entry = copied_node.entry
- type.specialize_entry(entry, cname)
-
- # Reuse existing Entries (e.g. from .pxd files).
- for i, orig_entry in enumerate(env.cfunc_entries):
- if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
- copied_node.entry = env.cfunc_entries[i]
- if not copied_node.entry.func_cname:
- copied_node.entry.func_cname = entry.func_cname
- entry = copied_node.entry
- type = entry.type
- break
- else:
- new_cfunc_entries.append(entry)
-
+ type.specialize_entry(entry, cname)
+
+ # Reuse existing Entries (e.g. from .pxd files).
+ for i, orig_entry in enumerate(env.cfunc_entries):
+ if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
+ copied_node.entry = env.cfunc_entries[i]
+ if not copied_node.entry.func_cname:
+ copied_node.entry.func_cname = entry.func_cname
+ entry = copied_node.entry
+ type = entry.type
+ break
+ else:
+ new_cfunc_entries.append(entry)
+
copied_node.type = type
entry.type, type.entry = type, entry
@@ -189,14 +189,14 @@ class FusedCFuncDefNode(StatListNode):
if not self.replace_fused_typechecks(copied_node):
break
- # replace old entry with new entries
- try:
- cindex = env.cfunc_entries.index(self.node.entry)
- except ValueError:
- env.cfunc_entries.extend(new_cfunc_entries)
- else:
- env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
-
+ # replace old entry with new entries
+ try:
+ cindex = env.cfunc_entries.index(self.node.entry)
+ except ValueError:
+ env.cfunc_entries.extend(new_cfunc_entries)
+ else:
+ env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
@@ -225,7 +225,7 @@ class FusedCFuncDefNode(StatListNode):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
- fused types are already in the local scope, and we need the specialized
+ fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
@@ -276,7 +276,7 @@ class FusedCFuncDefNode(StatListNode):
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
- Generate Cython code for instance checks, matching an object to
+ Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
@@ -331,22 +331,22 @@ class FusedCFuncDefNode(StatListNode):
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
- def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
+ def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
- for specialized_type in pythran_types+specialized_buffer_types:
- final_type = specialized_type
- if specialized_type.is_pythran_expr:
- specialized_type = specialized_type.org_buffer
+ for specialized_type in pythran_types+specialized_buffer_types:
+ final_type = specialized_type
+ if specialized_type.is_pythran_expr:
+ specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
- specialized_type_name=final_type.specialization_string)
+ specialized_type_name=final_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
@@ -361,11 +361,11 @@ class FusedCFuncDefNode(StatListNode):
if dtype.is_int:
cond += ' and {{signed_match}}'
- if final_type.is_pythran_expr:
- cond += ' and arg_is_pythran_compatible'
-
+ if final_type.is_pythran_expr:
+ cond += ' and arg_is_pythran_compatible'
+
if codewriter.indenter("if %s:" % cond):
- #codewriter.putln("print 'buffer match found based on numpy dtype'")
+ #codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
@@ -390,7 +390,7 @@ class FusedCFuncDefNode(StatListNode):
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
- "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
+ "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
@@ -400,7 +400,7 @@ class FusedCFuncDefNode(StatListNode):
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
- memslice = {{coerce_from_py_func}}(arg, 0)
+ memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
@@ -410,7 +410,7 @@ class FusedCFuncDefNode(StatListNode):
__pyx_PyErr_Clear()
""" % self.match)
- def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
+ def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
@@ -421,11 +421,11 @@ class FusedCFuncDefNode(StatListNode):
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
- """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
+ """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
- """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
+ """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
@@ -439,39 +439,39 @@ class FusedCFuncDefNode(StatListNode):
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
- dtype_signed = kind == 'i'
- """)
- pyx_code.indent(2)
- if pythran_types:
- pyx_code.put_chunk(
- u"""
- # Pythran only supports the endianness of the current compiler
- byteorder = dtype.byteorder
- if byteorder == "<" and not __Pyx_Is_Little_Endian():
- arg_is_pythran_compatible = False
- elif byteorder == ">" and __Pyx_Is_Little_Endian():
- arg_is_pythran_compatible = False
- if arg_is_pythran_compatible:
- cur_stride = itemsize
- shape = arg.shape
- strides = arg.strides
- for i in range(arg.ndim-1, -1, -1):
- if (<Py_ssize_t>strides[i]) != cur_stride:
- arg_is_pythran_compatible = False
- break
- cur_stride *= <Py_ssize_t> shape[i]
- else:
- arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
- """)
+ dtype_signed = kind == 'i'
+ """)
+ pyx_code.indent(2)
+ if pythran_types:
+ pyx_code.put_chunk(
+ u"""
+ # Pythran only supports the endianness of the current compiler
+ byteorder = dtype.byteorder
+ if byteorder == "<" and not __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ elif byteorder == ">" and __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ if arg_is_pythran_compatible:
+ cur_stride = itemsize
+ shape = arg.shape
+ strides = arg.strides
+ for i in range(arg.ndim-1, -1, -1):
+ if (<Py_ssize_t>strides[i]) != cur_stride:
+ arg_is_pythran_compatible = False
+ break
+ cur_stride *= <Py_ssize_t> shape[i]
+ else:
+ arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
+ """)
pyx_code.named_insertion_point("numpy_dtype_checks")
- self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
+ self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
- def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
+ def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
@@ -495,16 +495,16 @@ class FusedCFuncDefNode(StatListNode):
itemsize = -1
""")
- if pythran_types:
- pyx_code.local_variable_declarations.put_chunk(u"""
- cdef bint arg_is_pythran_compatible
- cdef Py_ssize_t cur_stride
- """)
-
+ if pythran_types:
+ pyx_code.local_variable_declarations.put_chunk(u"""
+ cdef bint arg_is_pythran_compatible
+ cdef Py_ssize_t cur_stride
+ """)
+
pyx_code.imports.put_chunk(
u"""
cdef type ndarray
- ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
+ ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
""")
seen_typedefs = set()
@@ -527,7 +527,7 @@ class FusedCFuncDefNode(StatListNode):
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
- {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
+ {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
@@ -540,7 +540,7 @@ class FusedCFuncDefNode(StatListNode):
specialized_types.sort()
seen_py_type_names = set()
- normal_types, buffer_types, pythran_types = [], [], []
+ normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
@@ -552,12 +552,12 @@ class FusedCFuncDefNode(StatListNode):
has_object_fallback = True
else:
normal_types.append(specialized_type)
- elif specialized_type.is_pythran_expr:
- pythran_types.append(specialized_type)
+ elif specialized_type.is_pythran_expr:
+ pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
- return normal_types, buffer_types, pythran_types, has_object_fallback
+ return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
@@ -565,18 +565,18 @@ class FusedCFuncDefNode(StatListNode):
# PROCESSING ARGUMENT {{arg_tuple_idx}}
if {{arg_tuple_idx}} < len(<tuple>args):
arg = (<tuple>args)[{{arg_tuple_idx}}]
- elif kwargs is not None and '{{arg.name}}' in <dict>kwargs:
+ elif kwargs is not None and '{{arg.name}}' in <dict>kwargs:
arg = (<dict>kwargs)['{{arg.name}}']
else:
{{if arg.default}}
arg = (<tuple>defaults)[{{default_idx}}]
{{else}}
- {{if arg_tuple_idx < min_positional_args}}
- raise TypeError("Expected at least %d argument%s, got %d" % (
- {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(<tuple>args)))
- {{else}}
- raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
- {{endif}}
+ {{if arg_tuple_idx < min_positional_args}}
+ raise TypeError("Expected at least %d argument%s, got %d" % (
+ {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(<tuple>args)))
+ {{else}}
+ raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
+ {{endif}}
{{endif}}
""")
@@ -596,10 +596,10 @@ class FusedCFuncDefNode(StatListNode):
'memviewslice_cname': MemoryView.memviewslice_cname,
'func_args': self.node.args,
'n_fused': len(fused_types),
- 'min_positional_args':
- self.node.num_required_args - self.node.num_required_kw_args
- if is_def else
- sum(1 for arg in self.node.args if arg.default is None),
+ 'min_positional_args':
+ self.node.num_required_args - self.node.num_required_kw_args
+ if is_def else
+ sum(1 for arg in self.node.args if arg.default is None),
'name': orig_py_func.entry.name,
}
@@ -609,8 +609,8 @@ class FusedCFuncDefNode(StatListNode):
u"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
- type __Pyx_ImportNumPyArrayTypeIfAvailable()
- int __Pyx_Is_Little_Endian()
+ type __Pyx_ImportNumPyArrayTypeIfAvailable()
+ int __Pyx_Is_Little_Endian()
""")
decl_code.indent()
@@ -622,22 +622,22 @@ class FusedCFuncDefNode(StatListNode):
dest_sig = [None] * {{n_fused}}
- if kwargs is not None and not kwargs:
- kwargs = None
+ if kwargs is not None and not kwargs:
+ kwargs = None
cdef Py_ssize_t i
# instance check body
""")
-
+
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
- pyx_code.named_insertion_point("func_defs")
+ pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
default_idx = 0
- all_buffer_types = OrderedSet()
+ all_buffer_types = OrderedSet()
seen_fused_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused:
@@ -657,16 +657,16 @@ class FusedCFuncDefNode(StatListNode):
default_idx=default_idx,
)
- normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
+ normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"):
if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env)
- if buffer_types or pythran_types:
- env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
- self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
+ if buffer_types or pythran_types:
+ env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
+ self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback:
pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match)
@@ -677,26 +677,26 @@ class FusedCFuncDefNode(StatListNode):
fused_index += 1
all_buffer_types.update(buffer_types)
- all_buffer_types.update(ty.org_buffer for ty in pythran_types)
+ all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
if all_buffer_types:
- self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
+ self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
- env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
+ env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
pyx_code.put_chunk(
u"""
candidates = []
for sig in <dict>signatures:
match_found = False
- src_sig = sig.strip('()').split('|')
- for i in range(len(dest_sig)):
- dst_type = dest_sig[i]
+ src_sig = sig.strip('()').split('|')
+ for i in range(len(dest_sig)):
+ dst_type = dest_sig[i]
if dst_type is not None:
- if src_sig[i] == dst_type:
+ if src_sig[i] == dst_type:
match_found = True
else:
match_found = False
diff --git a/contrib/tools/cython/Cython/Compiler/Main.py b/contrib/tools/cython/Cython/Compiler/Main.py
index af873843b5..df53e9ba01 100644
--- a/contrib/tools/cython/Cython/Compiler/Main.py
+++ b/contrib/tools/cython/Cython/Compiler/Main.py
@@ -9,8 +9,8 @@ import re
import sys
import io
-if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3):
- sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
+if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3):
+ sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
sys.exit(1)
try:
@@ -23,7 +23,7 @@ except ImportError:
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
-from . import Errors
+from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
@@ -40,7 +40,7 @@ verbose = 0
standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir, 'Includes'))
-
+
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
@@ -55,7 +55,7 @@ class CompilationData(object):
# result CompilationResult
pass
-
+
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
@@ -226,8 +226,8 @@ class Context(object):
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
- if Options.source_root:
- rel_path = os.path.relpath(pxd_pathname, Options.source_root)
+ if Options.source_root:
+ rel_path = os.path.relpath(pxd_pathname, Options.source_root)
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, qualified_name)
if err:
@@ -250,7 +250,7 @@ class Context(object):
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
- qualified_name in ('stdlib', 'stdio', 'stl')):
+ qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
@@ -372,7 +372,7 @@ class Context(object):
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
- "Formal grammar can only be used with compiled Cython with an available pgen.")
+ "Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
@@ -442,7 +442,7 @@ class Context(object):
pass
result.c_file = None
-
+
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
@@ -458,7 +458,7 @@ def get_output_filename(source_filename, cwd, options):
else:
return suggested_file_name
-
+
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
@@ -469,7 +469,7 @@ def create_default_resultobj(compilation_source, options):
result.embedded_metadata = options.embedded_metadata
return result
-
+
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
@@ -491,8 +491,8 @@ def run_pipeline(source, options, full_module_name=None, context=None):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
- if Options.source_root:
- rel_path = os.path.relpath(abs_path, Options.source_root)
+ if Options.source_root:
+ rel_path = os.path.relpath(abs_path, Options.source_root)
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
@@ -519,15 +519,15 @@ def run_pipeline(source, options, full_module_name=None, context=None):
return result
-# ------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Main Python entry points
#
-# ------------------------------------------------------------------------
+# ------------------------------------------------------------------------
class CompilationSource(object):
"""
- Contains the data necessary to start up a compilation pipeline for
+ Contains the data necessary to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
@@ -535,7 +535,7 @@ class CompilationSource(object):
self.full_module_name = full_module_name
self.cwd = cwd
-
+
class CompilationOptions(object):
r"""
See default_options at the end of this module for a list of all possible
@@ -562,22 +562,22 @@ class CompilationOptions(object):
message = "got unknown compilation option%s, please remove: %s" % (
's' if len(unknown_options) > 1 else '',
', '.join(unknown_options))
- raise ValueError(message)
+ raise ValueError(message)
directive_defaults = Options.get_directive_defaults()
directives = dict(options['compiler_directives']) # copy mutable field
- # check for invalid directives
+ # check for invalid directives
unknown_directives = set(directives) - set(directive_defaults)
- if unknown_directives:
- message = "got unknown compiler directive%s: %s" % (
- 's' if len(unknown_directives) > 1 else '',
- ', '.join(unknown_directives))
- raise ValueError(message)
+ if unknown_directives:
+ message = "got unknown compiler directive%s: %s" % (
+ 's' if len(unknown_directives) > 1 else '',
+ ', '.join(unknown_directives))
+ raise ValueError(message)
options['compiler_directives'] = directives
- if directives.get('np_pythran', False) and not options['cplus']:
- import warnings
- warnings.warn("C++ mode forced when in Pythran mode!")
- options['cplus'] = True
+ if directives.get('np_pythran', False) and not options['cplus']:
+ import warnings
+ warnings.warn("C++ mode forced when in Pythran mode!")
+ options['cplus'] = True
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = directives['language_level']
elif not options.get('language_level'):
@@ -764,14 +764,14 @@ def compile_multiple(sources, options):
processed.add(source)
return results
-
+
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
- checking and recursing on dependencies. The source argument may be a string
- or a sequence of strings. If it is a string and no recursion or timestamp
+ checking and recursing on dependencies. The source argument may be a string
+ or a sequence of strings. If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
@@ -781,7 +781,7 @@ def compile(source, options = None, full_module_name = None, **kwds):
else:
return compile_multiple(source, options)
-
+
@Utils.cached_function
def search_include_directories(dirs, qualified_name, suffix, pos, include=False):
"""
@@ -847,16 +847,16 @@ def search_include_directories(dirs, qualified_name, suffix, pos, include=False)
return None
-# ------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Main command-line entry point
#
-# ------------------------------------------------------------------------
-
+# ------------------------------------------------------------------------
+
def setuptools_main():
return main(command_line = 1)
-
+
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
@@ -882,11 +882,11 @@ def main(command_line = 0):
sys.exit(1)
-# ------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
-# ------------------------------------------------------------------------
+# ------------------------------------------------------------------------
default_options = dict(
show_version = 0,
@@ -918,6 +918,6 @@ default_options = dict(
output_dir=None,
build_dir=None,
cache=None,
- create_extension=None,
- np_pythran=False
+ create_extension=None,
+ np_pythran=False
)
diff --git a/contrib/tools/cython/Cython/Compiler/MemoryView.py b/contrib/tools/cython/Cython/Compiler/MemoryView.py
index 0406d6c716..29d27432ea 100644
--- a/contrib/tools/cython/Cython/Compiler/MemoryView.py
+++ b/contrib/tools/cython/Cython/Compiler/MemoryView.py
@@ -28,12 +28,12 @@ def concat_flags(*flags):
format_flag = "PyBUF_FORMAT"
-memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)"
-memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)"
-memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)"
-memview_full_access = "PyBUF_FULL_RO"
-#memview_strided_access = "PyBUF_STRIDED_RO"
-memview_strided_access = "PyBUF_RECORDS_RO"
+memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)"
+memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)"
+memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)"
+memview_full_access = "PyBUF_FULL_RO"
+#memview_strided_access = "PyBUF_STRIDED_RO"
+memview_strided_access = "PyBUF_RECORDS_RO"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
@@ -392,15 +392,15 @@ def get_memoryview_flag(access, packing):
return 'contiguous'
-def get_is_contig_func_name(contig_type, ndim):
- assert contig_type in ('C', 'F')
- return "__pyx_memviewslice_is_contig_%s%d" % (contig_type, ndim)
+def get_is_contig_func_name(contig_type, ndim):
+ assert contig_type in ('C', 'F')
+ return "__pyx_memviewslice_is_contig_%s%d" % (contig_type, ndim)
-def get_is_contig_utility(contig_type, ndim):
- assert contig_type in ('C', 'F')
- C = dict(context, ndim=ndim, contig_type=contig_type)
- utility = load_memview_c_utility("MemviewSliceCheckContig", C, requires=[is_contig_utility])
+def get_is_contig_utility(contig_type, ndim):
+ assert contig_type in ('C', 'F')
+ C = dict(context, ndim=ndim, contig_type=contig_type)
+ utility = load_memview_c_utility("MemviewSliceCheckContig", C, requires=[is_contig_utility])
return utility
@@ -816,7 +816,7 @@ memviewslice_declare_code = load_memview_c_utility(
context=context,
requires=[])
-atomic_utility = load_memview_c_utility("Atomics", context)
+atomic_utility = load_memview_c_utility("Atomics", context)
memviewslice_init_code = load_memview_c_utility(
"MemviewSliceInit",
@@ -843,7 +843,7 @@ view_utility_code = load_memview_cy_utility(
context=context,
requires=[Buffer.GetAndReleaseBufferUtilityCode(),
Buffer.buffer_struct_declare_code,
- Buffer.buffer_formats_declare_code,
+ Buffer.buffer_formats_declare_code,
memviewslice_init_code,
is_contig_utility,
overlapping_utility,
diff --git a/contrib/tools/cython/Cython/Compiler/ModuleNode.py b/contrib/tools/cython/Cython/Compiler/ModuleNode.py
index cd7166408e..304f6f9a85 100644
--- a/contrib/tools/cython/Cython/Compiler/ModuleNode.py
+++ b/contrib/tools/cython/Cython/Compiler/ModuleNode.py
@@ -7,13 +7,13 @@ from __future__ import absolute_import
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
- EncodedString=object, re=object)
+ EncodedString=object, re=object)
from collections import defaultdict
import json
import operator
import os
-import re
+import re
from .PyrexTypes import CPtrType
from . import Future
@@ -24,14 +24,14 @@ from . import Nodes
from . import Options
from . import TypeSlots
from . import PyrexTypes
-from . import Pythran
+from . import Pythran
from .Errors import error, warning
from .PyrexTypes import py_object_type
from ..Utils import open_new_file, replace_suffix, decode_filename, build_hex_version
-from .Code import UtilityCode, IncludeCode
+from .Code import UtilityCode, IncludeCode
from .StringEncoding import EncodedString
-from .Pythran import has_np_pythran
+from .Pythran import has_np_pythran
def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd()
@@ -87,9 +87,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.utility_code_list.extend(scope.utility_code_list)
- for inc in scope.c_includes.values():
- self.scope.process_include(inc)
-
+ for inc in scope.c_includes.values():
+ self.scope.process_include(inc)
+
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
@@ -106,8 +106,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.merge_in(scope)
def analyse_declarations(self, env):
- if has_np_pythran(env):
- Pythran.include_pythran_generic(env)
+ if has_np_pythran(env):
+ Pythran.include_pythran_generic(env)
if self.directives:
env.old_style_globals = self.directives['old_style_globals']
if not Options.docstrings:
@@ -120,7 +120,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else:
env.doc = self.doc
env.directives = self.directives
-
+
self.body.analyse_declarations(env)
def prepare_utility_code(self):
@@ -204,14 +204,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
h_code.putln("")
- h_code.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */")
- h_code.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */")
- h_code.putln("")
+ h_code.putln("/* WARNING: the interface of the module init function changed in CPython 3.5. */")
+ h_code.putln("/* It now returns a PyModuleDef instance instead of a PyModule instance. */")
+ h_code.putln("")
h_code.putln("#if PY_MAJOR_VERSION < 3")
init_name = 'init' + (options.init_suffix or env.module_name)
h_code.putln("PyMODINIT_FUNC %s(void);" % init_name)
h_code.putln("#else")
- h_code.putln("PyMODINIT_FUNC %s(void);" % self.mod_init_func_cname('PyInit', env, options))
+ h_code.putln("PyMODINIT_FUNC %s(void);" % self.mod_init_func_cname('PyInit', env, options))
h_code.putln("#endif")
h_code.putln("")
h_code.putln("#endif /* !%s */" % h_guard)
@@ -225,7 +225,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
- entry.type.declaration_code(entry.cname)))
+ entry.type.declaration_code(entry.cname)))
if i_code:
i_code.putln("cdef extern %s" % (
entry.type.declaration_code(entry.cname, pyrex=1)))
@@ -366,16 +366,16 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code = globalstate['before_global_var']
code.putln('#define __Pyx_MODULE_NAME "%s"' % self.full_module_name)
- module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
- code.putln("extern int %s;" % module_is_main)
- code.putln("int %s = 0;" % module_is_main)
+ module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
+ code.putln("extern int %s;" % module_is_main)
+ code.putln("int %s = 0;" % module_is_main)
code.putln("")
code.putln("/* Implementation of '%s' */" % env.qualified_name)
- code = globalstate['late_includes']
- code.putln("/* Late includes */")
- self.generate_includes(env, modules, code, early=False)
-
+ code = globalstate['late_includes']
+ code.putln("/* Late includes */")
+ self.generate_includes(env, modules, code, early=False)
+
code = globalstate['all_the_rest']
self.generate_cached_builtins_decls(env, code)
@@ -640,30 +640,30 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#define PY_SSIZE_T_CLEAN")
code.putln("#endif /* PY_SSIZE_T_CLEAN */")
- for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
- if inc.location == inc.INITIAL:
- inc.write(code)
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.INITIAL:
+ inc.write(code)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, "
"please install development version of Python.")
code.putln("#elif PY_VERSION_HEX < 0x02060000 || "
- "(0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)")
- code.putln(" #error Cython requires Python 2.6+ or Python 3.3+.")
+ "(0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)")
+ code.putln(" #error Cython requires Python 2.6+ or Python 3.3+.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
from .. import __version__
code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_'))
code.putln('#define CYTHON_HEX_VERSION %s' % build_hex_version(__version__))
- code.putln("#define CYTHON_FUTURE_DIVISION %d" % (
- Future.division in env.context.future_directives))
+ code.putln("#define CYTHON_FUTURE_DIVISION %d" % (
+ Future.division in env.context.future_directives))
self._put_setup_code(code, "CModulePreamble")
if env.context.options.cplus:
self._put_setup_code(code, "CppInitCode")
else:
self._put_setup_code(code, "CInitCode")
- self._put_setup_code(code, "PythonCompatibility")
+ self._put_setup_code(code, "PythonCompatibility")
self._put_setup_code(code, "MathInitCode")
# Using "(void)cname" to prevent "unused" warnings.
@@ -686,10 +686,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
- code.putln("/* Early includes */")
- self.generate_includes(env, cimported_modules, code, late=False)
+ code.putln("/* Early includes */")
+ self.generate_includes(env, cimported_modules, code, late=False)
code.putln("")
- code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
+ code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
code.putln("#endif")
code.putln("")
@@ -729,9 +729,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
PyrexTypes.c_int_type.create_from_py_utility_code(env)
code.put(Nodes.branch_prediction_macros)
- code.putln('static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }')
+ code.putln('static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }')
code.putln('')
- code.putln('static PyObject *%s = NULL;' % env.module_cname)
+ code.putln('static PyObject *%s = NULL;' % env.module_cname)
code.putln('static PyObject *%s;' % env.module_dict_cname)
code.putln('static PyObject *%s;' % Naming.builtins_cname)
code.putln('static PyObject *%s = NULL;' % Naming.cython_runtime_cname)
@@ -745,10 +745,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname)
- env.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
- if has_np_pythran(env):
- env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
-
+ env.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ if has_np_pythran(env):
+ env.use_utility_code(UtilityCode.load_cached("PythranConversion", "CppSupport.cpp"))
+
def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro
code.putln("#ifndef %s" % name)
@@ -764,31 +764,31 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(" #define DL_IMPORT(_T) _T")
code.putln("#endif")
- def generate_includes(self, env, cimported_modules, code, early=True, late=True):
+ def generate_includes(self, env, cimported_modules, code, early=True, late=True):
includes = []
- for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
- if inc.location == inc.EARLY:
- if early:
- inc.write(code)
- elif inc.location == inc.LATE:
- if late:
- inc.write(code)
- if early:
- code.putln_openmp("#include <omp.h>")
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.EARLY:
+ if early:
+ inc.write(code)
+ elif inc.location == inc.LATE:
+ if late:
+ inc.write(code)
+ if early:
+ code.putln_openmp("#include <omp.h>")
def generate_filename_table(self, code):
- from os.path import isabs, basename
+ from os.path import isabs, basename
code.putln("")
code.putln("static const char *%s[] = {" % Naming.filetable_cname)
if code.globalstate.filename_list:
for source_desc in code.globalstate.filename_list:
- file_path = source_desc.get_filenametable_entry()
- if Options.source_root:
- # If source root specified, dump description - it's source root relative filename
- file_path = source_desc.get_description()
- if isabs(file_path):
- file_path = basename(file_path) # never include absolute paths
- escaped_filename = file_path.replace("\\", "\\\\").replace('"', r'\"')
+ file_path = source_desc.get_filenametable_entry()
+ if Options.source_root:
+ # If source root specified, dump description - it's source root relative filename
+ file_path = source_desc.get_description()
+ if isabs(file_path):
+ file_path = basename(file_path) # never include absolute paths
+ escaped_filename = file_path.replace("\\", "\\\\").replace('"', r'\"')
code.putln('"%s",' % escaped_filename)
else:
# Some C compilers don't like an empty array
@@ -926,96 +926,96 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
[base_class.empty_declaration_code() for base_class in type.base_classes])
code.put(" : public %s" % base_class_decl)
code.putln(" {")
- py_attrs = [e for e in scope.entries.values()
- if e.type.is_pyobject and not e.is_inherited]
+ py_attrs = [e for e in scope.entries.values()
+ if e.type.is_pyobject and not e.is_inherited]
has_virtual_methods = False
- constructor = None
- destructor = None
+ constructor = None
+ destructor = None
for attr in scope.var_entries:
if attr.type.is_cfunction:
code.put("inline ")
if attr.type.is_cfunction and attr.type.is_static_method:
code.put("static ")
- elif attr.name == "<init>":
- constructor = attr
- elif attr.name == "<del>":
- destructor = attr
- elif attr.type.is_cfunction:
+ elif attr.name == "<init>":
+ constructor = attr
+ elif attr.name == "<del>":
+ destructor = attr
+ elif attr.type.is_cfunction:
code.put("virtual ")
has_virtual_methods = True
code.putln("%s;" % attr.type.declaration_code(attr.cname))
- is_implementing = 'init_module' in code.globalstate.parts
- if constructor or py_attrs:
- if constructor:
- arg_decls = []
- arg_names = []
- for arg in constructor.type.original_args[
- :len(constructor.type.args)-constructor.type.optional_arg_count]:
- arg_decls.append(arg.declaration_code())
- arg_names.append(arg.cname)
- if constructor.type.optional_arg_count:
- arg_decls.append(constructor.type.op_arg_struct.declaration_code(Naming.optional_args_cname))
- arg_names.append(Naming.optional_args_cname)
- if not arg_decls:
- arg_decls = ["void"]
- else:
- arg_decls = ["void"]
- arg_names = []
- if is_implementing:
- code.putln("%s(%s) {" % (type.cname, ", ".join(arg_decls)))
- if py_attrs:
- code.put_ensure_gil()
- for attr in py_attrs:
- code.put_init_var_to_py_none(attr, nanny=False);
- if constructor:
- code.putln("%s(%s);" % (constructor.cname, ", ".join(arg_names)))
- if py_attrs:
- code.put_release_ensured_gil()
- code.putln("}")
- else:
- code.putln("%s(%s);" % (type.cname, ", ".join(arg_decls)))
- if destructor or py_attrs or has_virtual_methods:
- if has_virtual_methods:
- code.put("virtual ")
- if is_implementing:
- code.putln("~%s() {" % type.cname)
- if py_attrs:
- code.put_ensure_gil()
- if destructor:
- code.putln("%s();" % destructor.cname)
- if py_attrs:
- for attr in py_attrs:
- code.put_var_xdecref(attr, nanny=False);
- code.put_release_ensured_gil()
- code.putln("}")
- else:
- code.putln("~%s();" % type.cname)
- if py_attrs:
- # Also need copy constructor and assignment operators.
- if is_implementing:
- code.putln("%s(const %s& __Pyx_other) {" % (type.cname, type.cname))
- code.put_ensure_gil()
- for attr in scope.var_entries:
- if not attr.type.is_cfunction:
- code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
- code.put_var_incref(attr, nanny=False)
- code.put_release_ensured_gil()
- code.putln("}")
- code.putln("%s& operator=(const %s& __Pyx_other) {" % (type.cname, type.cname))
- code.putln("if (this != &__Pyx_other) {")
- code.put_ensure_gil()
- for attr in scope.var_entries:
- if not attr.type.is_cfunction:
- code.put_var_xdecref(attr, nanny=False);
- code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
- code.put_var_incref(attr, nanny=False)
- code.put_release_ensured_gil()
- code.putln("}")
- code.putln("return *this;")
- code.putln("}")
- else:
- code.putln("%s(const %s& __Pyx_other);" % (type.cname, type.cname))
- code.putln("%s& operator=(const %s& __Pyx_other);" % (type.cname, type.cname))
+ is_implementing = 'init_module' in code.globalstate.parts
+ if constructor or py_attrs:
+ if constructor:
+ arg_decls = []
+ arg_names = []
+ for arg in constructor.type.original_args[
+ :len(constructor.type.args)-constructor.type.optional_arg_count]:
+ arg_decls.append(arg.declaration_code())
+ arg_names.append(arg.cname)
+ if constructor.type.optional_arg_count:
+ arg_decls.append(constructor.type.op_arg_struct.declaration_code(Naming.optional_args_cname))
+ arg_names.append(Naming.optional_args_cname)
+ if not arg_decls:
+ arg_decls = ["void"]
+ else:
+ arg_decls = ["void"]
+ arg_names = []
+ if is_implementing:
+ code.putln("%s(%s) {" % (type.cname, ", ".join(arg_decls)))
+ if py_attrs:
+ code.put_ensure_gil()
+ for attr in py_attrs:
+ code.put_init_var_to_py_none(attr, nanny=False);
+ if constructor:
+ code.putln("%s(%s);" % (constructor.cname, ", ".join(arg_names)))
+ if py_attrs:
+ code.put_release_ensured_gil()
+ code.putln("}")
+ else:
+ code.putln("%s(%s);" % (type.cname, ", ".join(arg_decls)))
+ if destructor or py_attrs or has_virtual_methods:
+ if has_virtual_methods:
+ code.put("virtual ")
+ if is_implementing:
+ code.putln("~%s() {" % type.cname)
+ if py_attrs:
+ code.put_ensure_gil()
+ if destructor:
+ code.putln("%s();" % destructor.cname)
+ if py_attrs:
+ for attr in py_attrs:
+ code.put_var_xdecref(attr, nanny=False);
+ code.put_release_ensured_gil()
+ code.putln("}")
+ else:
+ code.putln("~%s();" % type.cname)
+ if py_attrs:
+ # Also need copy constructor and assignment operators.
+ if is_implementing:
+ code.putln("%s(const %s& __Pyx_other) {" % (type.cname, type.cname))
+ code.put_ensure_gil()
+ for attr in scope.var_entries:
+ if not attr.type.is_cfunction:
+ code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
+ code.put_var_incref(attr, nanny=False)
+ code.put_release_ensured_gil()
+ code.putln("}")
+ code.putln("%s& operator=(const %s& __Pyx_other) {" % (type.cname, type.cname))
+ code.putln("if (this != &__Pyx_other) {")
+ code.put_ensure_gil()
+ for attr in scope.var_entries:
+ if not attr.type.is_cfunction:
+ code.put_var_xdecref(attr, nanny=False);
+ code.putln("%s = __Pyx_other.%s;" % (attr.cname, attr.cname))
+ code.put_var_incref(attr, nanny=False)
+ code.put_release_ensured_gil()
+ code.putln("}")
+ code.putln("return *this;")
+ code.putln("}")
+ else:
+ code.putln("%s(const %s& __Pyx_other);" % (type.cname, type.cname))
+ code.putln("%s& operator=(const %s& __Pyx_other);" % (type.cname, type.cname))
code.putln("};")
def generate_enum_definition(self, entry, code):
@@ -1247,31 +1247,31 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.generate_traverse_function(scope, code, entry)
if scope.needs_tp_clear():
self.generate_clear_function(scope, code, entry)
- if scope.defines_any_special(["__getitem__"]):
+ if scope.defines_any_special(["__getitem__"]):
self.generate_getitem_int_function(scope, code)
- if scope.defines_any_special(["__setitem__", "__delitem__"]):
+ if scope.defines_any_special(["__setitem__", "__delitem__"]):
self.generate_ass_subscript_function(scope, code)
- if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]):
+ if scope.defines_any_special(["__getslice__", "__setslice__", "__delslice__"]):
warning(self.pos,
"__getslice__, __setslice__, and __delslice__ are not supported by Python 3, "
"use __getitem__, __setitem__, and __delitem__ instead", 1)
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.")
code.putln("#endif")
- if scope.defines_any_special(["__setslice__", "__delslice__"]):
+ if scope.defines_any_special(["__setslice__", "__delslice__"]):
self.generate_ass_slice_function(scope, code)
- if scope.defines_any_special(["__getattr__", "__getattribute__"]):
+ if scope.defines_any_special(["__getattr__", "__getattribute__"]):
self.generate_getattro_function(scope, code)
- if scope.defines_any_special(["__setattr__", "__delattr__"]):
+ if scope.defines_any_special(["__setattr__", "__delattr__"]):
self.generate_setattro_function(scope, code)
- if scope.defines_any_special(["__get__"]):
+ if scope.defines_any_special(["__get__"]):
self.generate_descr_get_function(scope, code)
- if scope.defines_any_special(["__set__", "__delete__"]):
+ if scope.defines_any_special(["__set__", "__delete__"]):
self.generate_descr_set_function(scope, code)
- if not scope.is_closure_class_scope and scope.defines_any(["__dict__"]):
+ if not scope.is_closure_class_scope and scope.defines_any(["__dict__"]):
self.generate_dict_getter_function(scope, code)
- if scope.defines_any_special(TypeSlots.richcmp_special_methods):
- self.generate_richcmp_function(scope, code)
+ if scope.defines_any_special(TypeSlots.richcmp_special_methods):
+ self.generate_richcmp_function(scope, code)
self.generate_property_accessors(scope, code)
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
@@ -1450,11 +1450,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
is_final_type = scope.parent_type.is_final_type
needs_gc = scope.needs_gc()
- weakref_slot = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ weakref_slot = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
if weakref_slot not in scope.var_entries:
weakref_slot = None
- dict_slot = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
+ dict_slot = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
if dict_slot not in scope.var_entries:
dict_slot = None
@@ -1467,15 +1467,15 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
if not is_final_type:
# in Py3.4+, call tp_finalize() as early as possible
- code.putln("#if CYTHON_USE_TP_FINALIZE")
+ code.putln("#if CYTHON_USE_TP_FINALIZE")
if needs_gc:
finalised_check = '!_PyGC_FINALIZED(o)'
else:
finalised_check = (
'(!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))')
- code.putln(
- "if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)"
- " && Py_TYPE(o)->tp_finalize) && %s) {" % finalised_check)
+ code.putln(
+ "if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)"
+ " && Py_TYPE(o)->tp_finalize) && %s) {" % finalised_check)
# if instance was resurrected by finaliser, return
code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;")
code.putln("}")
@@ -1622,9 +1622,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
for entry in py_attrs:
var_code = "p->%s" % entry.cname
- var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code)
+ var_as_pyobject = PyrexTypes.typecast(py_object_type, entry.type, var_code)
code.putln("if (%s) {" % var_code)
- code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject)
+ code.putln("e = (*v)(%s, a); if (e) return e;" % var_as_pyobject)
code.putln("}")
# Traverse buffer exporting objects.
@@ -1841,76 +1841,76 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(
"}")
- def generate_richcmp_function(self, scope, code):
- if scope.lookup_here("__richcmp__"):
- # user implemented, nothing to do
- return
- # otherwise, we have to generate it from the Python special methods
- richcmp_cfunc = scope.mangle_internal("tp_richcompare")
- code.putln("")
- code.putln("static PyObject *%s(PyObject *o1, PyObject *o2, int op) {" % richcmp_cfunc)
- code.putln("switch (op) {")
-
- class_scopes = []
- cls = scope.parent_type
- while cls is not None and not cls.entry.visibility == 'extern':
- class_scopes.append(cls.scope)
- cls = cls.scope.parent_type.base_type
- assert scope in class_scopes
-
- extern_parent = None
- if cls and cls.entry.visibility == 'extern':
- # need to call up into base classes as we may not know all implemented comparison methods
- extern_parent = cls if cls.typeptr_cname else scope.parent_type.base_type
-
- eq_entry = None
- has_ne = False
- for cmp_method in TypeSlots.richcmp_special_methods:
- for class_scope in class_scopes:
- entry = class_scope.lookup_here(cmp_method)
- if entry is not None:
- break
- else:
- continue
-
- cmp_type = cmp_method.strip('_').upper() # e.g. "__eq__" -> EQ
- code.putln("case Py_%s: {" % cmp_type)
- if cmp_method == '__eq__':
- eq_entry = entry
- # Python itself does not do this optimisation, it seems...
- #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_True);")
- elif cmp_method == '__ne__':
- has_ne = True
- # Python itself does not do this optimisation, it seems...
- #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
- code.putln("return %s(o1, o2);" % entry.func_cname)
- code.putln("}")
-
- if eq_entry and not has_ne and not extern_parent:
- code.putln("case Py_NE: {")
- code.putln("PyObject *ret;")
- # Python itself does not do this optimisation, it seems...
- #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
- code.putln("ret = %s(o1, o2);" % eq_entry.func_cname)
- code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
- code.putln("int b = __Pyx_PyObject_IsTrue(ret); Py_DECREF(ret);")
- code.putln("if (unlikely(b < 0)) return NULL;")
- code.putln("ret = (b) ? Py_False : Py_True;")
- code.putln("Py_INCREF(ret);")
- code.putln("}")
- code.putln("return ret;")
- code.putln("}")
-
- code.putln("default: {")
- if extern_parent and extern_parent.typeptr_cname:
- code.putln("if (likely(%s->tp_richcompare)) return %s->tp_richcompare(o1, o2, op);" % (
- extern_parent.typeptr_cname, extern_parent.typeptr_cname))
- code.putln("return __Pyx_NewRef(Py_NotImplemented);")
- code.putln("}")
-
- code.putln("}") # switch
- code.putln("}")
-
+ def generate_richcmp_function(self, scope, code):
+ if scope.lookup_here("__richcmp__"):
+ # user implemented, nothing to do
+ return
+ # otherwise, we have to generate it from the Python special methods
+ richcmp_cfunc = scope.mangle_internal("tp_richcompare")
+ code.putln("")
+ code.putln("static PyObject *%s(PyObject *o1, PyObject *o2, int op) {" % richcmp_cfunc)
+ code.putln("switch (op) {")
+
+ class_scopes = []
+ cls = scope.parent_type
+ while cls is not None and not cls.entry.visibility == 'extern':
+ class_scopes.append(cls.scope)
+ cls = cls.scope.parent_type.base_type
+ assert scope in class_scopes
+
+ extern_parent = None
+ if cls and cls.entry.visibility == 'extern':
+ # need to call up into base classes as we may not know all implemented comparison methods
+ extern_parent = cls if cls.typeptr_cname else scope.parent_type.base_type
+
+ eq_entry = None
+ has_ne = False
+ for cmp_method in TypeSlots.richcmp_special_methods:
+ for class_scope in class_scopes:
+ entry = class_scope.lookup_here(cmp_method)
+ if entry is not None:
+ break
+ else:
+ continue
+
+ cmp_type = cmp_method.strip('_').upper() # e.g. "__eq__" -> EQ
+ code.putln("case Py_%s: {" % cmp_type)
+ if cmp_method == '__eq__':
+ eq_entry = entry
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_True);")
+ elif cmp_method == '__ne__':
+ has_ne = True
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
+ code.putln("return %s(o1, o2);" % entry.func_cname)
+ code.putln("}")
+
+ if eq_entry and not has_ne and not extern_parent:
+ code.putln("case Py_NE: {")
+ code.putln("PyObject *ret;")
+ # Python itself does not do this optimisation, it seems...
+ #code.putln("if (o1 == o2) return __Pyx_NewRef(Py_False);")
+ code.putln("ret = %s(o1, o2);" % eq_entry.func_cname)
+ code.putln("if (likely(ret && ret != Py_NotImplemented)) {")
+ code.putln("int b = __Pyx_PyObject_IsTrue(ret); Py_DECREF(ret);")
+ code.putln("if (unlikely(b < 0)) return NULL;")
+ code.putln("ret = (b) ? Py_False : Py_True;")
+ code.putln("Py_INCREF(ret);")
+ code.putln("}")
+ code.putln("return ret;")
+ code.putln("}")
+
+ code.putln("default: {")
+ if extern_parent and extern_parent.typeptr_cname:
+ code.putln("if (likely(%s->tp_richcompare)) return %s->tp_richcompare(o1, o2, op);" % (
+ extern_parent.typeptr_cname, extern_parent.typeptr_cname))
+ code.putln("return __Pyx_NewRef(Py_NotImplemented);")
+ code.putln("}")
+
+ code.putln("}") # switch
+ code.putln("}")
+
def generate_getattro_function(self, scope, code):
# First try to get the attribute using __getattribute__, if defined, or
# PyObject_GenericGetAttr.
@@ -1918,19 +1918,19 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
- def lookup_here_or_base(n, tp=None, extern_return=None):
+ def lookup_here_or_base(n, tp=None, extern_return=None):
# Recursive lookup
- if tp is None:
- tp = scope.parent_type
- r = tp.scope.lookup_here(n)
- if r is None:
- if tp.is_external and extern_return is not None:
- return extern_return
- if tp.base_type is not None:
- return lookup_here_or_base(n, tp.base_type)
- return r
-
- has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern")
+ if tp is None:
+ tp = scope.parent_type
+ r = tp.scope.lookup_here(n)
+ if r is None:
+ if tp.is_external and extern_return is not None:
+ return extern_return
+ if tp.base_type is not None:
+ return lookup_here_or_base(n, tp.base_type)
+ return r
+
+ has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern")
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.putln("")
@@ -1942,20 +1942,20 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"PyObject *v = %s(o, n);" % (
getattribute_entry.func_cname))
else:
- if not has_instance_dict and scope.parent_type.is_final_type:
- # Final with no dict => use faster type attribute lookup.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c"))
- generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict"
- elif not has_instance_dict or has_instance_dict == "extern":
- # No dict in the known ancestors, but don't know about extern ancestors or subtypes.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c"))
- generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr"
- else:
- generic_getattr_cfunc = "PyObject_GenericGetAttr"
+ if not has_instance_dict and scope.parent_type.is_final_type:
+ # Final with no dict => use faster type attribute lookup.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict"
+ elif not has_instance_dict or has_instance_dict == "extern":
+ # No dict in the known ancestors, but don't know about extern ancestors or subtypes.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ else:
+ generic_getattr_cfunc = "PyObject_GenericGetAttr"
code.putln(
- "PyObject *v = %s(o, n);" % generic_getattr_cfunc)
+ "PyObject *v = %s(o, n);" % generic_getattr_cfunc)
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
@@ -2311,47 +2311,47 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("return -1;")
code.putln("}")
code.putln("")
- code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1])
+ code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1])
code.exit_cfunc_scope() # done with labels
def generate_module_init_func(self, imported_modules, env, options, code):
subfunction = self.mod_init_subfunction(self.pos, self.scope, code)
-
+
code.enter_cfunc_scope(self.scope)
code.putln("")
- code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
+ code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
init_name = 'init' + (options.init_suffix or env.module_name)
- header2 = "__Pyx_PyMODINIT_FUNC %s(void)" % init_name
- header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env, options)
+ header2 = "__Pyx_PyMODINIT_FUNC %s(void)" % init_name
+ header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env, options)
code.putln("#if PY_MAJOR_VERSION < 3")
- # Optimise for small code size as the module init function is only executed once.
- code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
+ # Optimise for small code size as the module init function is only executed once.
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
code.putln(header2)
code.putln("#else")
- code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
code.putln(header3)
-
- # CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+
+ # CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
code.putln("{")
- code.putln("return PyModuleDef_Init(&%s);" % Naming.pymoduledef_cname)
- code.putln("}")
-
- mod_create_func = UtilityCode.load_as_string("ModuleCreationPEP489", "ModuleSetupCode.c")[1]
- code.put(mod_create_func)
-
- code.putln("")
- # main module init code lives in Py_mod_exec function, not in PyInit function
+ code.putln("return PyModuleDef_Init(&%s);" % Naming.pymoduledef_cname)
+ code.putln("}")
+
+ mod_create_func = UtilityCode.load_as_string("ModuleCreationPEP489", "ModuleSetupCode.c")[1]
+ code.put(mod_create_func)
+
+ code.putln("")
+ # main module init code lives in Py_mod_exec function, not in PyInit function
code.putln("static CYTHON_SMALL_CODE int %s(PyObject *%s)" % (
- self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env),
- Naming.pymodinit_module_arg))
- code.putln("#endif") # PEP489
-
- code.putln("#endif") # Py3
-
- # start of module init/exec function (pre/post PEP 489)
- code.putln("{")
-
+ self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env),
+ Naming.pymodinit_module_arg))
+ code.putln("#endif") # PEP489
+
+ code.putln("#endif") # Py3
+
+ # start of module init/exec function (pre/post PEP 489)
+ code.putln("{")
+
tempdecl_code = code.insertion_point()
profile = code.globalstate.directives['profile']
@@ -2360,34 +2360,34 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.globalstate.use_utility_code(UtilityCode.load_cached("Profile", "Profile.c"))
code.put_declare_refcount_context()
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
# Most extension modules simply can't deal with it, and Cython isn't ready either.
# See issues listed here: https://docs.python.org/3/c-api/init.html#sub-interpreter-support
code.putln("if (%s) {" % Naming.module_cname)
- # Hack: enforce single initialisation.
+ # Hack: enforce single initialisation.
code.putln("if (%s == %s) return 0;" % (
- Naming.module_cname,
- Naming.pymodinit_module_arg,
- ))
+ Naming.module_cname,
+ Naming.pymodinit_module_arg,
+ ))
code.putln('PyErr_SetString(PyExc_RuntimeError,'
' "Module \'%s\' has already been imported. Re-initialisation is not supported.");' %
env.module_name)
code.putln("return -1;")
code.putln("}")
- code.putln("#elif PY_MAJOR_VERSION >= 3")
- # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
- code.putln("if (%s) return __Pyx_NewRef(%s);" % (
- Naming.module_cname,
- Naming.module_cname,
- ))
- code.putln("#endif")
-
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
+ code.putln("if (%s) return __Pyx_NewRef(%s);" % (
+ Naming.module_cname,
+ Naming.module_cname,
+ ))
+ code.putln("#endif")
+
if profile or linetrace:
tempdecl_code.put_trace_declarations()
code.put_trace_frame_init()
- refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1]
- code.putln(refnanny_import_code.rstrip())
+ refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1]
+ code.putln(refnanny_import_code.rstrip())
code.put_setup_refcount_context(header3)
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
@@ -2403,14 +2403,14 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("%s = PyUnicode_FromStringAndSize(\"\", 0); %s" % (
Naming.empty_unicode, code.error_goto_if_null(Naming.empty_unicode, self.pos)))
- for ext_type in ('CyFunction', 'FusedFunction', 'Coroutine', 'Generator', 'AsyncGen', 'StopAsyncIteration'):
+ for ext_type in ('CyFunction', 'FusedFunction', 'Coroutine', 'Generator', 'AsyncGen', 'StopAsyncIteration'):
code.putln("#ifdef __Pyx_%s_USED" % ext_type)
code.put_error_if_neg(self.pos, "__pyx_%s_init()" % ext_type)
code.putln("#endif")
code.putln("/*--- Library function declarations ---*/")
- if env.directives['np_pythran']:
- code.put_error_if_neg(self.pos, "_import_array()")
+ if env.directives['np_pythran']:
+ code.put_error_if_neg(self.pos, "_import_array()")
code.putln("/*--- Threads initialization code ---*/")
code.putln("#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 "
@@ -2446,33 +2446,33 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("/*--- Constants init code ---*/")
code.put_error_if_neg(self.pos, "__Pyx_InitCachedConstants()")
- code.putln("/*--- Global type/function init code ---*/")
-
- with subfunction("Global init code") as inner_code:
- self.generate_global_init_code(env, inner_code)
+ code.putln("/*--- Global type/function init code ---*/")
- with subfunction("Variable export code") as inner_code:
- self.generate_c_variable_export_code(env, inner_code)
+ with subfunction("Global init code") as inner_code:
+ self.generate_global_init_code(env, inner_code)
- with subfunction("Function export code") as inner_code:
- self.generate_c_function_export_code(env, inner_code)
+ with subfunction("Variable export code") as inner_code:
+ self.generate_c_variable_export_code(env, inner_code)
- with subfunction("Type init code") as inner_code:
- self.generate_type_init_code(env, inner_code)
+ with subfunction("Function export code") as inner_code:
+ self.generate_c_function_export_code(env, inner_code)
- with subfunction("Type import code") as inner_code:
- for module in imported_modules:
- self.generate_type_import_code_for_module(module, env, inner_code)
+ with subfunction("Type init code") as inner_code:
+ self.generate_type_init_code(env, inner_code)
- with subfunction("Variable import code") as inner_code:
- for module in imported_modules:
- self.generate_c_variable_import_code_for_module(module, env, inner_code)
+ with subfunction("Type import code") as inner_code:
+ for module in imported_modules:
+ self.generate_type_import_code_for_module(module, env, inner_code)
- with subfunction("Function import code") as inner_code:
- for module in imported_modules:
- self.specialize_fused_types(module)
- self.generate_c_function_import_code_for_module(module, env, inner_code)
+ with subfunction("Variable import code") as inner_code:
+ for module in imported_modules:
+ self.generate_c_variable_import_code_for_module(module, env, inner_code)
+ with subfunction("Function import code") as inner_code:
+ for module in imported_modules:
+ self.specialize_fused_types(module)
+ self.generate_c_function_import_code_for_module(module, env, inner_code)
+
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
@@ -2522,11 +2522,11 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.put_finish_refcount_context()
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
- code.putln("return (%s != NULL) ? 0 : -1;" % env.module_cname)
- code.putln("#elif PY_MAJOR_VERSION >= 3")
- code.putln("return %s;" % env.module_cname)
- code.putln("#else")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("return (%s != NULL) ? 0 : -1;" % env.module_cname)
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ code.putln("return %s;" % env.module_cname)
+ code.putln("#else")
code.putln("return;")
code.putln("#endif")
code.putln('}')
@@ -2536,86 +2536,86 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.exit_cfunc_scope()
def mod_init_subfunction(self, pos, scope, orig_code):
- """
- Return a context manager that allows deviating the module init code generation
- into a separate function and instead inserts a call to it.
-
- Can be reused sequentially to create multiple functions.
- The functions get inserted at the point where the context manager was created.
- The call gets inserted where the context manager is used (on entry).
- """
- prototypes = orig_code.insertion_point()
- prototypes.putln("")
- function_code = orig_code.insertion_point()
- function_code.putln("")
-
- class ModInitSubfunction(object):
- def __init__(self, code_type):
- cname = '_'.join(code_type.lower().split())
- assert re.match("^[a-z0-9_]+$", cname)
- self.cfunc_name = "__Pyx_modinit_%s" % cname
- self.description = code_type
- self.tempdecl_code = None
- self.call_code = None
-
- def __enter__(self):
- self.call_code = orig_code.insertion_point()
- code = function_code
- code.enter_cfunc_scope(scope)
+ """
+ Return a context manager that allows deviating the module init code generation
+ into a separate function and instead inserts a call to it.
+
+ Can be reused sequentially to create multiple functions.
+ The functions get inserted at the point where the context manager was created.
+ The call gets inserted where the context manager is used (on entry).
+ """
+ prototypes = orig_code.insertion_point()
+ prototypes.putln("")
+ function_code = orig_code.insertion_point()
+ function_code.putln("")
+
+ class ModInitSubfunction(object):
+ def __init__(self, code_type):
+ cname = '_'.join(code_type.lower().split())
+ assert re.match("^[a-z0-9_]+$", cname)
+ self.cfunc_name = "__Pyx_modinit_%s" % cname
+ self.description = code_type
+ self.tempdecl_code = None
+ self.call_code = None
+
+ def __enter__(self):
+ self.call_code = orig_code.insertion_point()
+ code = function_code
+ code.enter_cfunc_scope(scope)
prototypes.putln("static CYTHON_SMALL_CODE int %s(void); /*proto*/" % self.cfunc_name)
- code.putln("static int %s(void) {" % self.cfunc_name)
- code.put_declare_refcount_context()
- self.tempdecl_code = code.insertion_point()
- code.put_setup_refcount_context(self.cfunc_name)
- # Leave a grepable marker that makes it easy to find the generator source.
- code.putln("/*--- %s ---*/" % self.description)
- return code
-
- def __exit__(self, *args):
- code = function_code
- code.put_finish_refcount_context()
- code.putln("return 0;")
-
- self.tempdecl_code.put_temp_declarations(code.funcstate)
- self.tempdecl_code = None
-
- needs_error_handling = code.label_used(code.error_label)
- if needs_error_handling:
- code.put_label(code.error_label)
- for cname, type in code.funcstate.all_managed_temps():
- code.put_xdecref(cname, type)
- code.put_finish_refcount_context()
- code.putln("return -1;")
- code.putln("}")
- code.exit_cfunc_scope()
- code.putln("")
-
- if needs_error_handling:
+ code.putln("static int %s(void) {" % self.cfunc_name)
+ code.put_declare_refcount_context()
+ self.tempdecl_code = code.insertion_point()
+ code.put_setup_refcount_context(self.cfunc_name)
+ # Leave a grepable marker that makes it easy to find the generator source.
+ code.putln("/*--- %s ---*/" % self.description)
+ return code
+
+ def __exit__(self, *args):
+ code = function_code
+ code.put_finish_refcount_context()
+ code.putln("return 0;")
+
+ self.tempdecl_code.put_temp_declarations(code.funcstate)
+ self.tempdecl_code = None
+
+ needs_error_handling = code.label_used(code.error_label)
+ if needs_error_handling:
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.put_finish_refcount_context()
+ code.putln("return -1;")
+ code.putln("}")
+ code.exit_cfunc_scope()
+ code.putln("")
+
+ if needs_error_handling:
self.call_code.putln(
self.call_code.error_goto_if_neg("%s()" % self.cfunc_name, pos))
- else:
- self.call_code.putln("(void)%s();" % self.cfunc_name)
- self.call_code = None
-
- return ModInitSubfunction
-
+ else:
+ self.call_code.putln("(void)%s();" % self.cfunc_name)
+ self.call_code = None
+
+ return ModInitSubfunction
+
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
module_path = self.pos[0].filename
if module_path:
- code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
+ code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
code.putln('if (PyObject_SetAttrString(%s, "__file__", %s) < 0) %s;' % (
env.module_cname,
code.globalstate.get_py_string_const(
EncodedString(decode_filename(module_path))).cname,
code.error_goto(self.pos)))
- code.putln("}")
+ code.putln("}")
if env.is_package:
# set __path__ to mark the module as package
- code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
+ code.putln('if (!CYTHON_PEP489_MULTI_PHASE_INIT) {')
temp = code.funcstate.allocate_temp(py_object_type, True)
code.putln('%s = Py_BuildValue("[O]", %s); %s' % (
temp,
@@ -2629,12 +2629,12 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
env.module_cname, temp, code.error_goto(self.pos)))
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
- code.putln("}")
+ code.putln("}")
elif env.is_package:
# packages require __path__, so all we can do is try to figure
# out the module path at runtime by rerunning the import lookup
- code.putln("if (!CYTHON_PEP489_MULTI_PHASE_INIT) {")
+ code.putln("if (!CYTHON_PEP489_MULTI_PHASE_INIT) {")
package_name, _ = self.full_module_name.rsplit('.', 1)
if '.' in package_name:
parent_name = '"%s"' % (package_name.rsplit('.', 1)[0],)
@@ -2648,7 +2648,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.globalstate.get_py_string_const(
EncodedString(env.module_name)).cname),
self.pos))
- code.putln("}")
+ code.putln("}")
# CPython may not have put us into sys.modules yet, but relative imports and reimports require it
fq_module_name = self.full_module_name
@@ -2750,9 +2750,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
main_method=Options.embed,
wmain_method=wmain))
- def mod_init_func_cname(self, prefix, env, options=None):
+ def mod_init_func_cname(self, prefix, env, options=None):
return '%s_%s' % (prefix, options and options.init_suffix or env.module_name)
-
+
def generate_pymoduledef_struct(self, env, options, code):
if env.doc:
doc = "%s" % code.get_string_const(env.doc)
@@ -2765,35 +2765,35 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("")
code.putln("#if PY_MAJOR_VERSION >= 3")
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
- exec_func_cname = self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env)
- code.putln("static PyObject* %s(PyObject *spec, PyModuleDef *def); /*proto*/" %
- Naming.pymodule_create_func_cname)
- code.putln("static int %s(PyObject* module); /*proto*/" % exec_func_cname)
-
- code.putln("static PyModuleDef_Slot %s[] = {" % Naming.pymoduledef_slots_cname)
- code.putln("{Py_mod_create, (void*)%s}," % Naming.pymodule_create_func_cname)
- code.putln("{Py_mod_exec, (void*)%s}," % exec_func_cname)
- code.putln("{0, NULL}")
- code.putln("};")
- code.putln("#endif")
-
- code.putln("")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ exec_func_cname = self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env)
+ code.putln("static PyObject* %s(PyObject *spec, PyModuleDef *def); /*proto*/" %
+ Naming.pymodule_create_func_cname)
+ code.putln("static int %s(PyObject* module); /*proto*/" % exec_func_cname)
+
+ code.putln("static PyModuleDef_Slot %s[] = {" % Naming.pymoduledef_slots_cname)
+ code.putln("{Py_mod_create, (void*)%s}," % Naming.pymodule_create_func_cname)
+ code.putln("{Py_mod_exec, (void*)%s}," % exec_func_cname)
+ code.putln("{0, NULL}")
+ code.putln("};")
+ code.putln("#endif")
+
+ code.putln("")
code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname)
code.putln(" PyModuleDef_HEAD_INIT,")
code.putln(' "%s",' % (options.module_name or env.module_name))
code.putln(" %s, /* m_doc */" % doc)
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
- code.putln(" 0, /* m_size */")
- code.putln("#else")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln(" 0, /* m_size */")
+ code.putln("#else")
code.putln(" -1, /* m_size */")
- code.putln("#endif")
+ code.putln("#endif")
code.putln(" %s /* m_methods */," % env.method_table_cname)
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
- code.putln(" %s, /* m_slots */" % Naming.pymoduledef_slots_cname)
- code.putln("#else")
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln(" %s, /* m_slots */" % Naming.pymoduledef_slots_cname)
+ code.putln("#else")
code.putln(" NULL, /* m_reload */")
- code.putln("#endif")
+ code.putln("#endif")
code.putln(" NULL, /* m_traverse */")
code.putln(" NULL, /* m_clear */")
code.putln(" %s /* m_free */" % cleanup_func)
@@ -2807,13 +2807,13 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
doc = "%s" % code.get_string_const(env.doc)
else:
doc = "0"
-
- code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
- code.putln("%s = %s;" % (
- env.module_cname,
- Naming.pymodinit_module_arg))
- code.put_incref(env.module_cname, py_object_type, nanny=False)
- code.putln("#else")
+
+ code.putln("#if CYTHON_PEP489_MULTI_PHASE_INIT")
+ code.putln("%s = %s;" % (
+ env.module_cname,
+ Naming.pymodinit_module_arg))
+ code.put_incref(env.module_cname, py_object_type, nanny=False)
+ code.putln("#else")
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln(
'%s = Py_InitModule4("%s", %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % (
@@ -2829,8 +2829,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
Naming.pymoduledef_cname))
code.putln("#endif")
code.putln(code.error_goto_if_null(env.module_cname, self.pos))
- code.putln("#endif") # CYTHON_PEP489_MULTI_PHASE_INIT
-
+ code.putln("#endif") # CYTHON_PEP489_MULTI_PHASE_INIT
+
code.putln(
"%s = PyModule_GetDict(%s); %s" % (
env.module_dict_cname, env.module_cname,
@@ -2842,10 +2842,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
Naming.builtins_cname,
code.error_goto_if_null(Naming.builtins_cname, self.pos)))
code.put_incref(Naming.builtins_cname, py_object_type, nanny=False)
- code.putln(
- '%s = PyImport_AddModule((char *) "cython_runtime"); %s' % (
- Naming.cython_runtime_cname,
- code.error_goto_if_null(Naming.cython_runtime_cname, self.pos)))
+ code.putln(
+ '%s = PyImport_AddModule((char *) "cython_runtime"); %s' % (
+ Naming.cython_runtime_cname,
+ code.error_goto_if_null(Naming.cython_runtime_cname, self.pos)))
code.put_incref(Naming.cython_runtime_cname, py_object_type, nanny=False)
code.putln(
'if (PyObject_SetAttrString(%s, "__builtins__", %s) < 0) %s;' % (
@@ -3110,8 +3110,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(' if (!%s) %s' % (type.typeptr_cname, error_code))
- def generate_type_ready_code(self, entry, code):
- Nodes.CClassDefNode.generate_type_ready_code(entry, code)
+ def generate_type_ready_code(self, entry, code):
+ Nodes.CClassDefNode.generate_type_ready_code(entry, code)
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
@@ -3188,7 +3188,7 @@ def generate_cfunction_declaration(entry, env, code, definition):
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
- dll_linkage = None
+ dll_linkage = None
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
diff --git a/contrib/tools/cython/Cython/Compiler/Naming.py b/contrib/tools/cython/Cython/Compiler/Naming.py
index 2c9b620788..be555eb7d2 100644
--- a/contrib/tools/cython/Cython/Compiler/Naming.py
+++ b/contrib/tools/cython/Cython/Compiler/Naming.py
@@ -102,10 +102,10 @@ print_function = pyrex_prefix + "print"
print_function_kwargs = pyrex_prefix + "print_kwargs"
cleanup_cname = pyrex_prefix + "module_cleanup"
pymoduledef_cname = pyrex_prefix + "moduledef"
-pymoduledef_slots_cname = pyrex_prefix + "moduledef_slots"
-pymodinit_module_arg = pyrex_prefix + "pyinit_module"
-pymodule_create_func_cname = pyrex_prefix + "pymod_create"
-pymodule_exec_func_cname = pyrex_prefix + "pymod_exec"
+pymoduledef_slots_cname = pyrex_prefix + "moduledef_slots"
+pymodinit_module_arg = pyrex_prefix + "pyinit_module"
+pymodule_create_func_cname = pyrex_prefix + "pymod_create"
+pymodule_exec_func_cname = pyrex_prefix + "pymod_exec"
optional_args_cname = pyrex_prefix + "optional_args"
import_star = pyrex_prefix + "import_star"
import_star_set = pyrex_prefix + "import_star_set"
@@ -120,7 +120,7 @@ quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty tempi
tp_dict_version_temp = pyrex_prefix + "tp_dict_version"
obj_dict_version_temp = pyrex_prefix + "obj_dict_version"
type_dict_guard_temp = pyrex_prefix + "type_dict_guard"
-cython_runtime_cname = pyrex_prefix + "cython_runtime"
+cython_runtime_cname = pyrex_prefix + "cython_runtime"
global_code_object_cache_find = pyrex_prefix + 'find_code_object'
global_code_object_cache_insert = pyrex_prefix + 'insert_code_object'
diff --git a/contrib/tools/cython/Cython/Compiler/Nodes.py b/contrib/tools/cython/Cython/Compiler/Nodes.py
index 6436c5002d..0796f40c0f 100644
--- a/contrib/tools/cython/Cython/Compiler/Nodes.py
+++ b/contrib/tools/cython/Cython/Compiler/Nodes.py
@@ -10,7 +10,7 @@ cython.declare(sys=object, os=object, copy=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
- error_type=object, _py_int_types=object)
+ error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
@@ -28,7 +28,7 @@ from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
-from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
+from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
from ..Utils import add_metaclass
@@ -39,7 +39,7 @@ else:
def relative_position(pos):
- return (pos[0].get_filenametable_entry(), pos[1])
+ return (pos[0].get_filenametable_entry(), pos[1])
def embed_position(pos, docstring):
@@ -68,13 +68,13 @@ def embed_position(pos, docstring):
return doc
-def analyse_type_annotation(annotation, env, assigned_value=None):
+def analyse_type_annotation(annotation, env, assigned_value=None):
base_type = None
- is_ambiguous = False
+ is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
- warning(annotation.pos,
- "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
+ warning(annotation.pos,
+ "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
@@ -88,30 +88,30 @@ def analyse_type_annotation(annotation, env, assigned_value=None):
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
- if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
- # Map builtin numeric Python types to C types in safe cases.
- if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
- assigned_type = assigned_value.infer_type(env)
- if assigned_type and assigned_type.is_pyobject:
- # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
- is_ambiguous = True
- arg_type = None
- # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
- if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
- arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
- elif arg_type is not None and annotation.is_string_literal:
- warning(annotation.pos,
- "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
+ if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
+ # Map builtin numeric Python types to C types in safe cases.
+ if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
+ assigned_type = assigned_value.infer_type(env)
+ if assigned_type and assigned_type.is_pyobject:
+ # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
+ is_ambiguous = True
+ arg_type = None
+ # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
+ if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
+ arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
+ elif arg_type is not None and annotation.is_string_literal:
+ warning(annotation.pos,
+ "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
- elif is_ambiguous:
- warning(annotation.pos, "Ambiguous types in annotation, ignoring")
+ elif is_ambiguous:
+ warning(annotation.pos, "Ambiguous types in annotation, ignoring")
else:
- warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
+ warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
return base_type, arg_type
@@ -474,9 +474,9 @@ class StatNode(Node):
class CDefExternNode(StatNode):
- # include_file string or None
- # verbatim_include string or None
- # body StatListNode
+ # include_file string or None
+ # verbatim_include string or None
+ # body StatListNode
child_attrs = ["body"]
@@ -486,18 +486,18 @@ class CDefExternNode(StatNode):
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
- if self.include_file or self.verbatim_include:
- # Determine whether include should be late
- stats = self.body.stats
- if not env.directives['preliminary_late_includes_cy28']:
- late = False
- elif not stats:
- # Special case: empty 'cdef extern' blocks are early
- late = False
- else:
- late = all(isinstance(node, CVarDefNode) for node in stats)
- env.add_include_file(self.include_file, self.verbatim_include, late)
-
+ if self.include_file or self.verbatim_include:
+ # Determine whether include should be late
+ stats = self.body.stats
+ if not env.directives['preliminary_late_includes_cy28']:
+ late = False
+ elif not stats:
+ # Special case: empty 'cdef extern' blocks are early
+ late = False
+ else:
+ late = all(isinstance(node, CVarDefNode) for node in stats)
+ env.add_include_file(self.include_file, self.verbatim_include, late)
+
def analyse_expressions(self, env):
return self
@@ -539,7 +539,7 @@ class CNameDeclaratorNode(CDeclaratorNode):
default = None
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
@@ -565,11 +565,11 @@ class CPtrDeclaratorNode(CDeclaratorNode):
def analyse_templates(self):
return self.base.analyse_templates()
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
- return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CReferenceDeclaratorNode(CDeclaratorNode):
@@ -580,11 +580,11 @@ class CReferenceDeclaratorNode(CDeclaratorNode):
def analyse_templates(self):
return self.base.analyse_templates()
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
- return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CArrayDeclaratorNode(CDeclaratorNode):
@@ -593,7 +593,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
child_attrs = ["base", "dimension"]
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
@@ -607,7 +607,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
- return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
@@ -628,7 +628,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
if base_type.is_cfunction:
error(self.pos, "Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
- return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CFuncDeclaratorNode(CDeclaratorNode):
@@ -671,7 +671,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
else:
return None
- def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
+ def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
if directive_locals is None:
directive_locals = {}
if nonempty:
@@ -723,16 +723,16 @@ class CFuncDeclaratorNode(CDeclaratorNode):
and self.exception_check != '+'):
error(self.pos, "Exception clause not allowed for function returning Python object")
else:
- if self.exception_value is None and self.exception_check and self.exception_check != '+':
- # Use an explicit exception return value to speed up exception checks.
- # Even if it is not declared, we can use the default exception value of the return type,
- # unless the function is some kind of external function that we do not control.
- if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
- # Extension types are more difficult because the signature must match the base type signature.
- if not env.is_c_class_scope:
- from .ExprNodes import ConstNode
- self.exception_value = ConstNode(
- self.pos, value=return_type.exception_value, type=return_type)
+ if self.exception_value is None and self.exception_check and self.exception_check != '+':
+ # Use an explicit exception return value to speed up exception checks.
+ # Even if it is not declared, we can use the default exception value of the return type,
+ # unless the function is some kind of external function that we do not control.
+ if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
+ # Extension types are more difficult because the signature must match the base type signature.
+ if not env.is_c_class_scope:
+ from .ExprNodes import ConstNode
+ self.exception_value = ConstNode(
+ self.pos, value=return_type.exception_value, type=return_type)
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
@@ -789,7 +789,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
- return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
@@ -803,7 +803,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
- scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
+ scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
@@ -829,12 +829,12 @@ class CConstDeclaratorNode(CDeclaratorNode):
child_attrs = ["base"]
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
- return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CArgDeclNode(Node):
@@ -905,8 +905,8 @@ class CArgDeclNode(Node):
base_type = base_type.base_type
# inject type declaration from annotations
- # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
- if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
+ # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
+ if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
arg_type = self.inject_type_from_annotations(env)
if arg_type is not None:
base_type = arg_type
@@ -918,7 +918,7 @@ class CArgDeclNode(Node):
annotation = self.annotation
if not annotation:
return None
- base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
+ base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
if base_type is not None:
self.base_type = base_type
return arg_type
@@ -1155,7 +1155,7 @@ class TemplatedTypeNode(CBaseTypeNode):
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
- type = error_type
+ type = error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
@@ -1176,8 +1176,8 @@ class TemplatedTypeNode(CBaseTypeNode):
for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options)
- if has_np_pythran(env) and is_pythran_buffer(self.type):
- self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
+ if has_np_pythran(env) and is_pythran_buffer(self.type):
+ self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
else:
# Array
@@ -1352,11 +1352,11 @@ class CVarDefNode(StatNode):
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
- name_declarator, type = declarator.analyse(
- base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
+ name_declarator, type = declarator.analyse(
+ base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
else:
- name_declarator, type = declarator.analyse(
- base_type, env, visibility=visibility, in_pxd=self.in_pxd)
+ name_declarator, type = declarator.analyse(
+ base_type, env, visibility=visibility, in_pxd=self.in_pxd)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos, "Variable type '%s' is incomplete" % type)
@@ -1367,8 +1367,8 @@ class CVarDefNode(StatNode):
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
- if type.is_reference and self.visibility != 'extern':
- error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
+ if type.is_reference and self.visibility != 'extern':
+ error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
@@ -1611,8 +1611,8 @@ class CTypeDefNode(StatNode):
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
- name_declarator, type = self.declarator.analyse(
- base, env, visibility=self.visibility, in_pxd=self.in_pxd)
+ name_declarator, type = self.declarator.analyse(
+ base, env, visibility=self.visibility, in_pxd=self.in_pxd)
name = name_declarator.name
cname = name_declarator.cname
@@ -1684,18 +1684,18 @@ class FuncDefNode(StatNode, BlockNode):
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
- def analyse_annotation(self, env, annotation):
- # Annotations can not only contain valid Python expressions but arbitrary type references.
- if annotation is None:
- return None
- if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
- annotation = annotation.analyse_types(env)
- return annotation
-
+ def analyse_annotation(self, env, annotation):
+ # Annotations can not only contain valid Python expressions but arbitrary type references.
+ if annotation is None:
+ return None
+ if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
+ annotation = annotation.analyse_types(env)
+ return annotation
+
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
- arg.annotation = self.analyse_annotation(env, arg.annotation)
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
def align_argument_type(self, env, arg):
# @cython.locals()
@@ -1869,16 +1869,16 @@ class FuncDefNode(StatNode, BlockNode):
code.declare_gilstate()
if profile or linetrace:
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- tempvardecl_code.put_trace_declarations()
- code_object = self.code_object.calculate_result_code(code) if self.code_object else None
- code.put_trace_frame_init(code_object)
-
- # ----- Special check for getbuffer
- if is_getbuffer_slot:
- self.getbuffer_check(code)
-
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ tempvardecl_code.put_trace_declarations()
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
+
+ # ----- Special check for getbuffer
+ if is_getbuffer_slot:
+ self.getbuffer_check(code)
+
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
@@ -1904,8 +1904,8 @@ class FuncDefNode(StatNode, BlockNode):
# Scope unconditionally DECREFed on return.
code.putln("%s = %s;" % (
Naming.cur_scope_cname,
- lenv.scope_class.type.cast_code("Py_None")))
- code.put_incref("Py_None", py_object_type)
+ lenv.scope_class.type.cast_code("Py_None")))
+ code.put_incref("Py_None", py_object_type)
code.putln(code.error_goto(self.pos))
code.putln("} else {")
code.put_gotref(Naming.cur_scope_cname)
@@ -1932,14 +1932,14 @@ class FuncDefNode(StatNode, BlockNode):
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- if self.is_wrapper:
- trace_name = self.entry.name + " (wrapper)"
- else:
- trace_name = self.entry.name
- code.put_trace_call(
- trace_name, self.pos, nogil=not code.funcstate.gil_owned)
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.is_wrapper:
+ trace_name = self.entry.name + " (wrapper)"
+ else:
+ trace_name = self.entry.name
+ code.put_trace_call(
+ trace_name, self.pos, nogil=not code.funcstate.gil_owned)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
@@ -1952,7 +1952,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
- # we acquire arguments from object conversion, so we have
+ # we acquire arguments from object conversion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
@@ -2001,8 +2001,8 @@ class FuncDefNode(StatNode, BlockNode):
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
- elif not self.return_type.is_void:
- code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
@@ -2058,8 +2058,8 @@ class FuncDefNode(StatNode, BlockNode):
if err_val is not None:
if err_val != Naming.retval_cname:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
- elif not self.return_type.is_void:
- code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
@@ -2141,14 +2141,14 @@ class FuncDefNode(StatNode, BlockNode):
if profile or linetrace:
code.funcstate.can_trace = False
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- if self.return_type.is_pyobject:
- code.put_trace_return(
- Naming.retval_cname, nogil=not code.funcstate.gil_owned)
- else:
- code.put_trace_return(
- "Py_None", nogil=not code.funcstate.gil_owned)
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.return_type.is_pyobject:
+ code.put_trace_return(
+ Naming.retval_cname, nogil=not code.funcstate.gil_owned)
+ else:
+ code.put_trace_return(
+ "Py_None", nogil=not code.funcstate.gil_owned)
if not lenv.nogil:
# GIL holding function
@@ -2181,10 +2181,10 @@ class FuncDefNode(StatNode, BlockNode):
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
- entry = env.declare_arg(arg.name, arg.type, arg.pos)
- if arg.annotation:
- entry.annotation = arg.annotation
- return entry
+ entry = env.declare_arg(arg.name, arg.type, arg.pos)
+ if arg.annotation:
+ entry.annotation = arg.annotation
+ return entry
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
@@ -2230,59 +2230,59 @@ class FuncDefNode(StatNode, BlockNode):
#
# Special code for the __getbuffer__ function
#
- def _get_py_buffer_info(self):
- py_buffer = self.local_scope.arg_entries[1]
- try:
- # Check builtin definition of struct Py_buffer
- obj_type = py_buffer.type.base_type.scope.entries['obj'].type
- except (AttributeError, KeyError):
- # User code redeclared struct Py_buffer
- obj_type = None
- return py_buffer, obj_type
-
- # Old Python 3 used to support write-locks on buffer-like objects by
- # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
- # feature is obsolete, it was almost never used (only one instance in
- # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
- # (see bpo-14203). We add an extra check here to prevent legacy code from
- # from trying to use the feature and prevent segmentation faults.
- def getbuffer_check(self, code):
- py_buffer, _ = self._get_py_buffer_info()
- view = py_buffer.cname
- code.putln("if (%s == NULL) {" % view)
- code.putln("PyErr_SetString(PyExc_BufferError, "
- "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
- code.putln("return -1;")
+ def _get_py_buffer_info(self):
+ py_buffer = self.local_scope.arg_entries[1]
+ try:
+ # Check builtin definition of struct Py_buffer
+ obj_type = py_buffer.type.base_type.scope.entries['obj'].type
+ except (AttributeError, KeyError):
+ # User code redeclared struct Py_buffer
+ obj_type = None
+ return py_buffer, obj_type
+
+ # Old Python 3 used to support write-locks on buffer-like objects by
+ # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
+ # feature is obsolete, it was almost never used (only one instance in
+ # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
+ # (see bpo-14203). We add an extra check here to prevent legacy code from
+ # from trying to use the feature and prevent segmentation faults.
+ def getbuffer_check(self, code):
+ py_buffer, _ = self._get_py_buffer_info()
+ view = py_buffer.cname
+ code.putln("if (%s == NULL) {" % view)
+ code.putln("PyErr_SetString(PyExc_BufferError, "
+ "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
+ code.putln("return -1;")
code.putln("}")
- def getbuffer_init(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.put_init_to_py_none("%s->obj" % view, obj_type)
- code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
- else:
- code.putln("%s->obj = NULL;" % view)
-
+ def getbuffer_init(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.put_init_to_py_none("%s->obj" % view, obj_type)
+ code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ else:
+ code.putln("%s->obj = NULL;" % view)
+
def getbuffer_error_cleanup(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.putln("if (%s->obj != NULL) {" % view)
- code.put_gotref("%s->obj" % view)
- code.put_decref_clear("%s->obj" % view, obj_type)
- code.putln("}")
- else:
- code.putln("Py_CLEAR(%s->obj);" % view)
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj != NULL) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+ else:
+ code.putln("Py_CLEAR(%s->obj);" % view)
def getbuffer_normal_cleanup(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.putln("if (%s->obj == Py_None) {" % view)
- code.put_gotref("%s->obj" % view)
- code.put_decref_clear("%s->obj" % view, obj_type)
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj == Py_None) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
@@ -2358,10 +2358,10 @@ class CFuncDefNode(FuncDefNode):
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None),
- directive_locals=self.directive_locals, visibility=self.visibility)
+ directive_locals=self.directive_locals, visibility=self.visibility)
else:
name_declarator, type = self.declarator.analyse(
- base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
+ base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
if not type.is_cfunction:
error(self.pos, "Suite attached to non-function declaration")
# Remember the actual type according to the function header
@@ -2400,7 +2400,7 @@ class CFuncDefNode(FuncDefNode):
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
- if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
+ if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
@@ -2752,9 +2752,9 @@ class DefNode(FuncDefNode):
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
outer_attrs = ["decorators", "return_type_annotation"]
- is_staticmethod = False
- is_classmethod = False
-
+ is_staticmethod = False
+ is_classmethod = False
+
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
@@ -2797,22 +2797,22 @@ class DefNode(FuncDefNode):
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
- exception_value, exception_check = except_val or (None, False)
-
+ exception_value, exception_check = except_val or (None, False)
+
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
- annotation=formal_arg.annotation,
+ annotation=formal_arg.annotation,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
args=cfunc_args,
has_varargs=False,
exception_value=None,
- exception_check=exception_check,
+ exception_check=exception_check,
nogil=nogil,
with_gil=with_gil,
is_overridable=overridable)
@@ -2830,10 +2830,10 @@ class DefNode(FuncDefNode):
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
-
- if exception_value is None and cfunc_type.exception_value is not None:
- from .ExprNodes import ConstNode
- exception_value = ConstNode(
+
+ if exception_value is None and cfunc_type.exception_value is not None:
+ from .ExprNodes import ConstNode
+ exception_value = ConstNode(
self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
@@ -2898,7 +2898,7 @@ class DefNode(FuncDefNode):
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
- _, return_type = analyse_type_annotation(self.return_type_annotation, env)
+ _, return_type = analyse_type_annotation(self.return_type_annotation, env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
@@ -2926,13 +2926,13 @@ class DefNode(FuncDefNode):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
- # If we hare in pythran mode and we got a buffer supported by
- # Pythran, we change this node to a fused type
- if has_np_pythran(env) and base_type.is_pythran_expr:
- base_type = PyrexTypes.FusedType([
- base_type,
- #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
- base_type.org_buffer])
+ # If we hare in pythran mode and we got a buffer supported by
+ # Pythran, we change this node to a fused type
+ if has_np_pythran(env) and base_type.is_pythran_expr:
+ base_type = PyrexTypes.FusedType([
+ base_type,
+ #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
+ base_type.org_buffer])
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
@@ -2973,11 +2973,11 @@ class DefNode(FuncDefNode):
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
- if has_np_pythran(env):
- self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
- else:
- self.np_args_idx = []
-
+ if has_np_pythran(env):
+ self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
+ else:
+ self.np_args_idx = []
+
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
@@ -3133,7 +3133,7 @@ class DefNode(FuncDefNode):
self.analyse_default_values(env)
self.analyse_annotations(env)
if self.return_type_annotation:
- self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
+ self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
@@ -3208,10 +3208,10 @@ class DefNode(FuncDefNode):
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
- if arg_code_list:
- arg_code = ', '.join(arg_code_list)
- else:
- arg_code = 'void' # No arguments
+ if arg_code_list:
+ arg_code = ', '.join(arg_code_list)
+ else:
+ arg_code = 'void' # No arguments
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
@@ -3276,8 +3276,8 @@ class DefNodeWrapper(FuncDefNode):
self.signature = target_entry.signature
- self.np_args_idx = self.target.np_args_idx
-
+ self.np_args_idx = self.target.np_args_idx
+
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
@@ -3705,8 +3705,8 @@ class DefNodeWrapper(FuncDefNode):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
@@ -3730,12 +3730,12 @@ class DefNodeWrapper(FuncDefNode):
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
- if i != reversed_args[0][0]:
- code.putln('CYTHON_FALLTHROUGH;')
+ if i != reversed_args[0][0]:
+ code.putln('CYTHON_FALLTHROUGH;')
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
- code.putln('CYTHON_FALLTHROUGH;')
+ code.putln('CYTHON_FALLTHROUGH;')
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
@@ -3777,12 +3777,12 @@ class DefNodeWrapper(FuncDefNode):
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
- if arg.type.from_py_function:
+ if arg.type.from_py_function:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
- code.putln(arg.type.from_py_call_code(
- item, arg.entry.cname, arg.pos, code))
+ code.putln(arg.type.from_py_call_code(
+ item, arg.entry.cname, arg.pos, code))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
@@ -3855,7 +3855,7 @@ class DefNodeWrapper(FuncDefNode):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
- code.putln('CYTHON_FALLTHROUGH;')
+ code.putln('CYTHON_FALLTHROUGH;')
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
@@ -3883,8 +3883,8 @@ class DefNodeWrapper(FuncDefNode):
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
- if i != 0:
- code.putln('CYTHON_FALLTHROUGH;')
+ if i != 0:
+ code.putln('CYTHON_FALLTHROUGH;')
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
@@ -3896,12 +3896,12 @@ class DefNodeWrapper(FuncDefNode):
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
- code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
+ code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
@@ -3922,8 +3922,8 @@ class DefNodeWrapper(FuncDefNode):
code.putln('}')
elif arg.kw_only:
code.putln('else {')
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
@@ -3987,7 +3987,7 @@ class DefNodeWrapper(FuncDefNode):
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
@@ -4024,13 +4024,13 @@ class DefNodeWrapper(FuncDefNode):
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
# copied from CoerceFromPyTypeNode
- if new_type.from_py_function:
- code.putln(new_type.from_py_call_code(
- arg.hdr_cname,
- arg.entry.cname,
- arg.pos,
- code,
- ))
+ if new_type.from_py_function:
+ code.putln(new_type.from_py_call_code(
+ arg.hdr_cname,
+ arg.entry.cname,
+ arg.pos,
+ code,
+ ))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
@@ -4071,9 +4071,9 @@ class GeneratorDefNode(DefNode):
is_generator = True
is_coroutine = False
- is_iterable_coroutine = False
- is_asyncgen = False
- gen_type_name = 'Generator'
+ is_iterable_coroutine = False
+ is_asyncgen = False
+ gen_type_name = 'Generator'
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
@@ -4096,10 +4096,10 @@ class GeneratorDefNode(DefNode):
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
- '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
- self.gen_type_name,
- body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
- Naming.cur_scope_cname, name, qualname, module_name,
+ '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
+ self.gen_type_name,
+ body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
+ Naming.cur_scope_cname, name, qualname, module_name,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
@@ -4113,40 +4113,40 @@ class GeneratorDefNode(DefNode):
code.putln('}')
def generate_function_definitions(self, env, code):
- env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c"))
+ env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class AsyncDefNode(GeneratorDefNode):
- gen_type_name = 'Coroutine'
+ gen_type_name = 'Coroutine'
is_coroutine = True
-class IterableAsyncDefNode(AsyncDefNode):
- gen_type_name = 'IterableCoroutine'
- is_iterable_coroutine = True
-
-
-class AsyncGenNode(AsyncDefNode):
- gen_type_name = 'AsyncGen'
- is_asyncgen = True
-
-
+class IterableAsyncDefNode(AsyncDefNode):
+ gen_type_name = 'IterableCoroutine'
+ is_iterable_coroutine = True
+
+
+class AsyncGenNode(AsyncDefNode):
+ gen_type_name = 'AsyncGen'
+ is_asyncgen = True
+
+
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
is_inlined = False
- is_async_gen_body = False
+ is_async_gen_body = False
inlined_comprehension_type = None # container type for inlined comprehensions
- def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False):
+ def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False):
super(GeneratorBodyDefNode, self).__init__(
- pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body,
- doc=None, args=[], star_arg=None, starstar_arg=None)
+ pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body,
+ doc=None, args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
@@ -4167,10 +4167,10 @@ class GeneratorBodyDefNode(DefNode):
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
- header = "static PyObject *%s(PyObject *%s_obj, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % (
+ header = "static PyObject *%s(PyObject *%s_obj, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
- Naming.local_tstate_cname,
+ Naming.local_tstate_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
@@ -4199,14 +4199,14 @@ class GeneratorBodyDefNode(DefNode):
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
- code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name)
- profile = code.globalstate.directives['profile']
- linetrace = code.globalstate.directives['linetrace']
- if profile or linetrace:
- tempvardecl_code.put_trace_declarations()
- code.funcstate.can_trace = True
- code_object = self.code_object.calculate_result_code(code) if self.code_object else None
- code.put_trace_frame_init(code_object)
+ code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ tempvardecl_code.put_trace_declarations()
+ code.funcstate.can_trace = True
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
@@ -4237,7 +4237,7 @@ class GeneratorBodyDefNode(DefNode):
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
- if lenv.scope_class.type.scope.var_entries:
+ if lenv.scope_class.type.scope.var_entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
@@ -4245,9 +4245,9 @@ class GeneratorBodyDefNode(DefNode):
# FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
- if profile or linetrace:
- code.funcstate.can_trace = False
-
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
@@ -4255,13 +4255,13 @@ class GeneratorBodyDefNode(DefNode):
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
- if self.is_async_gen_body:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
- code.putln('PyErr_SetNone(%s);' % (
- '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
+ if self.is_async_gen_body:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.putln('PyErr_SetNone(%s);' % (
+ '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
# ----- Error cleanup
- if code.label_used(code.error_label):
+ if code.label_used(code.error_label):
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
@@ -4270,7 +4270,7 @@ class GeneratorBodyDefNode(DefNode):
if Future.generator_stop in env.global_scope().context.future_directives:
# PEP 479: turn accidental StopIteration exceptions into a RuntimeError
code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
- code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body))
+ code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body))
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
@@ -4283,14 +4283,14 @@ class GeneratorBodyDefNode(DefNode):
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
# For Py3.7, clearing is already done below.
code.putln("#if !CYTHON_USE_EXC_INFO_STACK")
- code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
code.putln("#endif")
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
- if profile or linetrace:
- code.put_trace_return(Naming.retval_cname,
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
@@ -4298,20 +4298,20 @@ class GeneratorBodyDefNode(DefNode):
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
- if profile or linetrace:
- resume_code.put_trace_call(self.entry.qualified_name, self.pos,
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ resume_code.put_trace_call(self.entry.qualified_name, self.pos,
+ nogil=not code.funcstate.gil_owned)
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
-
+
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
- if profile or linetrace:
- resume_code.put_trace_return("Py_None",
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ resume_code.put_trace_return("Py_None",
+ nogil=not code.funcstate.gil_owned)
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
@@ -4321,7 +4321,7 @@ class GeneratorBodyDefNode(DefNode):
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overridden.
+ # is overridden.
#
# py_func
#
@@ -4539,7 +4539,7 @@ class PyClassDefNode(ClassDefNode):
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
- from . import ExprNodes
+ from . import ExprNodes
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
@@ -4651,7 +4651,7 @@ class CClassDefNode(ClassDefNode):
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
- # bases TupleNode Base class(es)
+ # bases TupleNode Base class(es)
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
@@ -4734,34 +4734,34 @@ class CClassDefNode(ClassDefNode):
self.module.has_extern_class = 1
env.add_imported_module(self.module)
- if self.bases.args:
- base = self.bases.args[0]
- base_type = base.analyse_as_type(env)
- if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
- # Use the Python rather than C variant of these types.
- base_type = env.lookup(base_type.sign_and_name()).type
- if base_type is None:
- error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
- elif base_type == PyrexTypes.py_object_type:
- base_class_scope = None
- elif not base_type.is_extension_type and \
- not (base_type.is_builtin_type and base_type.objstruct_cname):
- error(base.pos, "'%s' is not an extension type" % base_type)
- elif not base_type.is_complete():
- error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
- base_type.name, self.class_name))
- elif base_type.scope and base_type.scope.directives and \
- base_type.is_final_type:
- error(base.pos, "Base class '%s' of type '%s' is final" % (
- base_type, self.class_name))
- elif base_type.is_builtin_type and \
- base_type.name in ('tuple', 'str', 'bytes'):
- error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
- % base_type.name)
+ if self.bases.args:
+ base = self.bases.args[0]
+ base_type = base.analyse_as_type(env)
+ if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
+ # Use the Python rather than C variant of these types.
+ base_type = env.lookup(base_type.sign_and_name()).type
+ if base_type is None:
+ error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
+ elif base_type == PyrexTypes.py_object_type:
+ base_class_scope = None
+ elif not base_type.is_extension_type and \
+ not (base_type.is_builtin_type and base_type.objstruct_cname):
+ error(base.pos, "'%s' is not an extension type" % base_type)
+ elif not base_type.is_complete():
+ error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
+ base_type.name, self.class_name))
+ elif base_type.scope and base_type.scope.directives and \
+ base_type.is_final_type:
+ error(base.pos, "Base class '%s' of type '%s' is final" % (
+ base_type, self.class_name))
+ elif base_type.is_builtin_type and \
+ base_type.name in ('tuple', 'str', 'bytes'):
+ error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
+ % base_type.name)
else:
- self.base_type = base_type
- if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
- warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
+ self.base_type = base_type
+ if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
+ warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
@@ -4822,28 +4822,28 @@ class CClassDefNode(ClassDefNode):
else:
scope.implemented = 1
- if len(self.bases.args) > 1:
- if not has_body or self.in_pxd:
- error(self.bases.args[1].pos, "Only declare first base in declaration.")
- # At runtime, we check that the other bases are heap types
- # and that a __dict__ is added if required.
- for other_base in self.bases.args[1:]:
- if other_base.analyse_as_type(env):
- error(other_base.pos, "Only one extension type base class allowed.")
- self.entry.type.early_init = 0
- from . import ExprNodes
- self.type_init_args = ExprNodes.TupleNode(
- self.pos,
- args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
- self.bases,
- ExprNodes.DictNode(self.pos, key_value_pairs=[])])
- elif self.base_type:
- self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
- self.type_init_args = None
- else:
- self.entry.type.early_init = 1
- self.type_init_args = None
-
+ if len(self.bases.args) > 1:
+ if not has_body or self.in_pxd:
+ error(self.bases.args[1].pos, "Only declare first base in declaration.")
+ # At runtime, we check that the other bases are heap types
+ # and that a __dict__ is added if required.
+ for other_base in self.bases.args[1:]:
+ if other_base.analyse_as_type(env):
+ error(other_base.pos, "Only one extension type base class allowed.")
+ self.entry.type.early_init = 0
+ from . import ExprNodes
+ self.type_init_args = ExprNodes.TupleNode(
+ self.pos,
+ args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
+ self.bases,
+ ExprNodes.DictNode(self.pos, key_value_pairs=[])])
+ elif self.base_type:
+ self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
+ self.type_init_args = None
+ else:
+ self.entry.type.early_init = 1
+ self.type_init_args = None
+
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
@@ -4853,8 +4853,8 @@ class CClassDefNode(ClassDefNode):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
- if self.type_init_args:
- self.type_init_args.analyse_expressions(env)
+ if self.type_init_args:
+ self.type_init_args.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
@@ -4868,175 +4868,175 @@ class CClassDefNode(ClassDefNode):
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
- if not self.entry.type.early_init:
- if self.type_init_args:
- self.type_init_args.generate_evaluation_code(code)
- bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
- first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
- # Let Python do the base types compatibility checking.
- trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
- code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
- trial_type, self.type_init_args.result()))
- code.putln(code.error_goto_if_null(trial_type, self.pos))
- code.put_gotref(trial_type)
- code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
- trial_type, first_base))
- code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
- code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
- trial_type, first_base))
- code.putln(code.error_goto(self.pos))
- code.putln("}")
- code.funcstate.release_temp(trial_type)
- code.put_incref(bases, PyrexTypes.py_object_type)
- code.put_giveref(bases)
- code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
- code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
- self.type_init_args.generate_disposal_code(code)
- self.type_init_args.free_temps(code)
-
- self.generate_type_ready_code(self.entry, code, True)
-
- # Also called from ModuleNode for early init types.
- @staticmethod
- def generate_type_ready_code(entry, code, heap_type_bases=False):
- # Generate a call to PyType_Ready for an extension
- # type defined in this module.
- type = entry.type
- typeobj_cname = type.typeobj_cname
- scope = type.scope
- if not scope: # could be None if there was an error
- return
- if entry.visibility != 'extern':
- for slot in TypeSlots.slot_table:
- slot.generate_dynamic_init_code(scope, code)
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
- readyfunc = "__Pyx_PyType_Ready"
- else:
- readyfunc = "PyType_Ready"
- code.putln(
- "if (%s(&%s) < 0) %s" % (
- readyfunc,
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Don't inherit tp_print from builtin types, restoring the
- # behavior of using tp_repr or tp_str instead.
+ if not self.entry.type.early_init:
+ if self.type_init_args:
+ self.type_init_args.generate_evaluation_code(code)
+ bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
+ # Let Python do the base types compatibility checking.
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
+ code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type, self.type_init_args.result()))
+ code.putln(code.error_goto_if_null(trial_type, self.pos))
+ code.put_gotref(trial_type)
+ code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ trial_type, first_base))
+ code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
+ code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
+ trial_type, first_base))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(trial_type)
+ code.put_incref(bases, PyrexTypes.py_object_type)
+ code.put_giveref(bases)
+ code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+ code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ self.type_init_args.generate_disposal_code(code)
+ self.type_init_args.free_temps(code)
+
+ self.generate_type_ready_code(self.entry, code, True)
+
+ # Also called from ModuleNode for early init types.
+ @staticmethod
+ def generate_type_ready_code(entry, code, heap_type_bases=False):
+ # Generate a call to PyType_Ready for an extension
+ # type defined in this module.
+ type = entry.type
+ typeobj_cname = type.typeobj_cname
+ scope = type.scope
+ if not scope: # could be None if there was an error
+ return
+ if entry.visibility != 'extern':
+ for slot in TypeSlots.slot_table:
+ slot.generate_dynamic_init_code(scope, code)
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ readyfunc = "__Pyx_PyType_Ready"
+ else:
+ readyfunc = "PyType_Ready"
+ code.putln(
+ "if (%s(&%s) < 0) %s" % (
+ readyfunc,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Don't inherit tp_print from builtin types, restoring the
+ # behavior of using tp_repr or tp_str instead.
# ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1)
code.putln("#if PY_VERSION_HEX < 0x030800B1")
- code.putln("%s.tp_print = 0;" % typeobj_cname)
+ code.putln("%s.tp_print = 0;" % typeobj_cname)
code.putln("#endif")
-
- # Use specialised attribute lookup for types with generic lookup but no instance dict.
- getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
- dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
- if getattr_slot_func == '0' and dictoffset_slot_func == '0':
- if type.is_final_type:
- py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
- utility_func = "PyObject_GenericGetAttrNoDict"
- else:
- py_cfunc = "__Pyx_PyObject_GenericGetAttr"
- utility_func = "PyObject_GenericGetAttr"
- code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
-
- code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
- " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
- typeobj_cname, typeobj_cname))
- code.putln("%s.tp_getattro = %s;" % (
- typeobj_cname, py_cfunc))
- code.putln("}")
-
- # Fix special method docstrings. This is a bit of a hack, but
- # unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
- for func in entry.type.scope.pyfunc_entries:
- is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
- if (func.is_special and Options.docstrings and
- func.wrapperbase_cname and not is_buffer):
- slot = TypeSlots.method_name_to_slot.get(func.name)
- preprocessor_guard = slot.preprocessor_guard_code() if slot else None
- if preprocessor_guard:
- code.putln(preprocessor_guard)
- code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
- code.putln("{")
- code.putln(
- 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
- typeobj_cname,
- func.name,
- code.error_goto_if_null('wrapper', entry.pos)))
- code.putln(
- "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
- code.putln(
- "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
- func.wrapperbase_cname))
- code.putln(
- "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
- code.putln(
- "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
- func.wrapperbase_cname))
- code.putln("}")
- code.putln("}")
- code.putln('#endif')
- if preprocessor_guard:
- code.putln('#endif')
- if type.vtable_cname:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
- code.putln(
- "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
- typeobj_cname,
- type.vtabptr_cname,
- code.error_goto(entry.pos)))
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
- code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
- typeobj_cname,
- code.error_goto(entry.pos)))
+
+ # Use specialised attribute lookup for types with generic lookup but no instance dict.
+ getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
+ dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
+ if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ if type.is_final_type:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
+ utility_func = "PyObject_GenericGetAttrNoDict"
+ else:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ utility_func = "PyObject_GenericGetAttr"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
+
+ code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
+ " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeobj_cname, typeobj_cname))
+ code.putln("%s.tp_getattro = %s;" % (
+ typeobj_cname, py_cfunc))
+ code.putln("}")
+
+ # Fix special method docstrings. This is a bit of a hack, but
+ # unless we let PyType_Ready create the slot wrappers we have
+ # a significant performance hit. (See trac #561.)
+ for func in entry.type.scope.pyfunc_entries:
+ is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
+ if (func.is_special and Options.docstrings and
+ func.wrapperbase_cname and not is_buffer):
+ slot = TypeSlots.method_name_to_slot.get(func.name)
+ preprocessor_guard = slot.preprocessor_guard_code() if slot else None
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("{")
+ code.putln(
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
+ typeobj_cname,
+ func.name,
+ code.error_goto_if_null('wrapper', entry.pos)))
+ code.putln(
+ "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ code.putln(
+ "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
+ func.wrapperbase_cname))
+ code.putln(
+ "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
+ code.putln(
+ "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
+ func.wrapperbase_cname))
+ code.putln("}")
+ code.putln("}")
+ code.putln('#endif')
+ if preprocessor_guard:
+ code.putln('#endif')
+ if type.vtable_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
+ code.putln(
+ "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
+ typeobj_cname,
+ type.vtabptr_cname,
+ code.error_goto(entry.pos)))
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
if not type.scope.is_internal and not type.scope.directives.get('internal'):
- # scope.is_internal is set for types defined by
- # Cython (such as closures), the 'internal'
- # directive is set by users
- code.putln(
+ # scope.is_internal is set for types defined by
+ # Cython (such as closures), the 'internal'
+ # directive is set by users
+ code.putln(
'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % (
- Naming.module_cname,
+ Naming.module_cname,
code.intern_identifier(scope.class_name),
- typeobj_cname,
- code.error_goto(entry.pos)))
- weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
- if weakref_entry:
- if weakref_entry.type is py_object_type:
- tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
- if type.typedef_flag:
- objstruct = type.objstruct_cname
- else:
- objstruct = "struct %s" % type.objstruct_cname
- code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
- tp_weaklistoffset,
- tp_weaklistoffset,
- objstruct,
- weakref_entry.cname))
- else:
- error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
- if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
- # Unfortunately, we cannot reliably detect whether a
- # superclass defined __reduce__ at compile time, so we must
- # do so at runtime.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Generate code to initialise the typeptr of an extension
- # type defined in this module to point to its type object.
- if type.typeobj_cname:
- code.putln(
- "%s = &%s;" % (
- type.typeptr_cname, type.typeobj_cname))
-
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_entry:
+ if weakref_entry.type is py_object_type:
+ tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
+ tp_weaklistoffset,
+ tp_weaklistoffset,
+ objstruct,
+ weakref_entry.cname))
+ else:
+ error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+ if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
+ # Unfortunately, we cannot reliably detect whether a
+ # superclass defined __reduce__ at compile time, so we must
+ # do so at runtime.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
+ code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Generate code to initialise the typeptr of an extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.typeptr_cname, type.typeobj_cname))
+
def annotate(self, code):
- if self.type_init_args:
- self.type_init_args.annotate(code)
+ if self.type_init_args:
+ self.type_init_args.annotate(code)
if self.body:
self.body.annotate(code)
@@ -5115,13 +5115,13 @@ class ExprStatNode(StatNode):
def analyse_declarations(self, env):
from . import ExprNodes
- expr = self.expr
- if isinstance(expr, ExprNodes.GeneralCallNode):
- func = expr.function.as_cython_attribute()
+ expr = self.expr
+ if isinstance(expr, ExprNodes.GeneralCallNode):
+ func = expr.function.as_cython_attribute()
if func == u'declare':
- args, kwds = expr.explicit_args_kwds()
+ args, kwds = expr.explicit_args_kwds()
if len(args):
- error(expr.pos, "Variable names must be specified.")
+ error(expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
@@ -5129,20 +5129,20 @@ class ExprStatNode(StatNode):
else:
env.declare_var(var.value, type, var.pos, is_cdef=True)
self.__class__ = PassStatNode
- elif getattr(expr, 'annotation', None) is not None:
- if expr.is_name:
- # non-code variable annotation, e.g. "name: type"
- expr.declare_from_annotation(env)
- self.__class__ = PassStatNode
- elif expr.is_attribute or expr.is_subscript:
- # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type"
- self.__class__ = PassStatNode
+ elif getattr(expr, 'annotation', None) is not None:
+ if expr.is_name:
+ # non-code variable annotation, e.g. "name: type"
+ expr.declare_from_annotation(env)
+ self.__class__ = PassStatNode
+ elif expr.is_attribute or expr.is_subscript:
+ # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type"
+ self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
- # Repeat in case of node replacement.
- self.expr.result_is_used = False # hint that .result() may safely be left empty
+ # Repeat in case of node replacement.
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
return self
def nogil_check(self, env):
@@ -5153,13 +5153,13 @@ class ExprStatNode(StatNode):
def generate_execution_code(self, code):
code.mark_pos(self.pos)
- self.expr.result_is_used = False # hint that .result() may safely be left empty
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
- result = self.expr.result()
- if not self.expr.type.is_void:
- result = "(void)(%s)" % result
- code.putln("%s;" % result)
+ result = self.expr.result()
+ if not self.expr.type.is_void:
+ result = "(void)(%s)" % result
+ code.putln("%s;" % result)
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
@@ -5873,9 +5873,9 @@ class DelStatNode(StatNode):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
- arg.generate_evaluation_code(code)
+ arg.generate_evaluation_code(code)
code.putln("delete %s;" % arg.result())
- arg.generate_disposal_code(code)
+ arg.generate_disposal_code(code)
arg.free_temps(code)
# else error reported earlier
@@ -5905,7 +5905,7 @@ class IndirectionNode(StatListNode):
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
-
+
class BreakStatNode(StatNode):
child_attrs = []
@@ -5944,12 +5944,12 @@ class ReturnStatNode(StatNode):
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
- # in_async_gen return inside of async generator
+ # in_async_gen return inside of async generator
child_attrs = ["value"]
is_terminator = True
in_generator = False
- in_async_gen = False
+ in_async_gen = False
# Whether we are in a parallel section
in_parallel = False
@@ -5961,8 +5961,8 @@ class ReturnStatNode(StatNode):
error(self.pos, "Return not inside a function body")
return self
if self.value:
- if self.in_async_gen:
- error(self.pos, "Return with value in async generator")
+ if self.in_async_gen:
+ error(self.pos, "Return with value in async generator")
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos, "Return with value in void function")
@@ -5986,23 +5986,23 @@ class ReturnStatNode(StatNode):
if not self.return_type:
# error reported earlier
return
-
- value = self.value
+
+ value = self.value
if self.return_type.is_pyobject:
- code.put_xdecref(Naming.retval_cname, self.return_type)
- if value and value.is_none:
- # Use specialised default handling for "return None".
- value = None
+ code.put_xdecref(Naming.retval_cname, self.return_type)
+ if value and value.is_none:
+ # Use specialised default handling for "return None".
+ value = None
- if value:
- value.generate_evaluation_code(code)
+ if value:
+ value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
- lhs_pos=value.pos,
- rhs=value,
+ lhs_pos=value.pos,
+ rhs=value,
code=code,
have_gil=self.in_nogil_context)
value.generate_post_assignment_code(code)
@@ -6012,22 +6012,22 @@ class ReturnStatNode(StatNode):
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
- value.py_result()))
- value.generate_disposal_code(code)
+ value.py_result()))
+ value.generate_disposal_code(code)
else:
- value.make_owned_reference(code)
+ value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
- value.result_as(self.return_type)))
+ value.result_as(self.return_type)))
value.generate_post_assignment_code(code)
- value.free_temps(code)
+ value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
- if self.in_async_gen:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
- code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
+ if self.in_async_gen:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
@@ -6103,8 +6103,8 @@ class RaiseStatNode(StatNode):
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
- if self.exc_type.is_name:
- code.globalstate.use_entry_utility_code(self.exc_type.entry)
+ if self.exc_type.is_name:
+ code.globalstate.use_entry_utility_code(self.exc_type.entry)
else:
type_code = "0"
if self.exc_value:
@@ -6272,13 +6272,13 @@ class IfStatNode(StatNode):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
- if self.else_clause:
- # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
- self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
- else:
+ if self.else_clause:
+ # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
+ self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
+ else:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
- self._set_branch_hint(if_clause, if_clause.body)
+ self._set_branch_hint(if_clause, if_clause.body)
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
@@ -6287,21 +6287,21 @@ class IfStatNode(StatNode):
code.putln("}")
code.put_label(end_label)
- def _set_branch_hint(self, clause, statements_node, inverse=False):
- if not statements_node.is_terminator:
- return
- if not isinstance(statements_node, StatListNode) or not statements_node.stats:
- return
- # Anything that unconditionally raises exceptions should be considered unlikely.
- if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
- if len(statements_node.stats) > 1:
- # Allow simple statements before the 'raise', but no conditions, loops, etc.
- non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
- for node in statements_node.stats[:-1]:
- if not isinstance(node, non_branch_nodes):
- return
- clause.branch_hint = 'likely' if inverse else 'unlikely'
-
+ def _set_branch_hint(self, clause, statements_node, inverse=False):
+ if not statements_node.is_terminator:
+ return
+ if not isinstance(statements_node, StatListNode) or not statements_node.stats:
+ return
+ # Anything that unconditionally raises exceptions should be considered unlikely.
+ if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
+ if len(statements_node.stats) > 1:
+ # Allow simple statements before the 'raise', but no conditions, loops, etc.
+ non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
+ for node in statements_node.stats[:-1]:
+ if not isinstance(node, non_branch_nodes):
+ return
+ clause.branch_hint = 'likely' if inverse else 'unlikely'
+
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
@@ -6322,7 +6322,7 @@ class IfClauseNode(Node):
# body StatNode
child_attrs = ["condition", "body"]
- branch_hint = None
+ branch_hint = None
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
@@ -6335,10 +6335,10 @@ class IfClauseNode(Node):
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
- condition = self.condition.result()
- if self.branch_hint:
- condition = '%s(%s)' % (self.branch_hint, condition)
- code.putln("if (%s) {" % condition)
+ condition = self.condition.result()
+ if self.branch_hint:
+ condition = '%s(%s)' % (self.branch_hint, condition)
+ code.putln("if (%s) {" % condition)
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
@@ -6595,66 +6595,66 @@ class DictIterationNextNode(Node):
var.release(code)
-class SetIterationNextNode(Node):
- # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
- # and checking the set size for changes. Created in Optimize.py.
- child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
- 'coerced_value_var', 'value_target', 'is_set_flag']
-
- coerced_value_var = value_ref = None
-
- def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
- Node.__init__(
- self, set_obj.pos,
- set_obj=set_obj,
- expected_size=expected_size,
- pos_index_var=pos_index_var,
- value_target=value_target,
- is_set_flag=is_set_flag,
- is_temp=True,
- type=PyrexTypes.c_bint_type)
-
- def analyse_expressions(self, env):
- from . import ExprNodes
- self.set_obj = self.set_obj.analyse_types(env)
- self.expected_size = self.expected_size.analyse_types(env)
- self.pos_index_var = self.pos_index_var.analyse_types(env)
- self.value_target = self.value_target.analyse_target_types(env)
- self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
- self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
- self.is_set_flag = self.is_set_flag.analyse_types(env)
- return self
-
- def generate_function_definitions(self, env, code):
- self.set_obj.generate_function_definitions(env, code)
-
- def generate_execution_code(self, code):
- code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
- self.set_obj.generate_evaluation_code(code)
-
- value_ref = self.value_ref
- value_ref.allocate(code)
-
- result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
- code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
- result_temp,
- self.set_obj.py_result(),
- self.expected_size.result(),
- self.pos_index_var.result(),
- value_ref.result(),
- self.is_set_flag.result()
- ))
- code.putln("if (unlikely(%s == 0)) break;" % result_temp)
- code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
- code.funcstate.release_temp(result_temp)
-
- # evaluate all coercions before the assignments
- code.put_gotref(value_ref.result())
- self.coerced_value_var.generate_evaluation_code(code)
- self.value_target.generate_assignment_code(self.coerced_value_var, code)
- value_ref.release(code)
-
-
+class SetIterationNextNode(Node):
+ # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
+ # and checking the set size for changes. Created in Optimize.py.
+ child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
+ 'coerced_value_var', 'value_target', 'is_set_flag']
+
+ coerced_value_var = value_ref = None
+
+ def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
+ Node.__init__(
+ self, set_obj.pos,
+ set_obj=set_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ value_target=value_target,
+ is_set_flag=is_set_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.set_obj = self.set_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ self.is_set_flag = self.is_set_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.set_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
+ self.set_obj.generate_evaluation_code(code)
+
+ value_ref = self.value_ref
+ value_ref.allocate(code)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
+ result_temp,
+ self.set_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ value_ref.result(),
+ self.is_set_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ code.put_gotref(value_ref.result())
+ self.coerced_value_var.generate_evaluation_code(code)
+ self.value_target.generate_assignment_code(self.coerced_value_var, code)
+ value_ref.release(code)
+
+
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
@@ -6780,11 +6780,11 @@ class AsyncForStatNode(_ForInStatNode):
is_async = True
- def __init__(self, pos, **kw):
+ def __init__(self, pos, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
- kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
+ kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
@@ -6841,27 +6841,27 @@ class ForFromStatNode(LoopNode, StatNode):
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
- self.set_up_loop(env)
- target_type = self.target.type
- if not (target_type.is_pyobject or target_type.is_numeric):
- error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
-
- self.body = self.body.analyse_expressions(env)
- if self.else_clause:
- self.else_clause = self.else_clause.analyse_expressions(env)
- return self
-
- def set_up_loop(self, env):
- from . import ExprNodes
-
- target_type = self.target.type
- if target_type.is_numeric:
- loop_type = target_type
+ self.set_up_loop(env)
+ target_type = self.target.type
+ if not (target_type.is_pyobject or target_type.is_numeric):
+ error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
+
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def set_up_loop(self, env):
+ from . import ExprNodes
+
+ target_type = self.target.type
+ if target_type.is_numeric:
+ loop_type = target_type
else:
- if target_type.is_enum:
- warning(self.target.pos,
- "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
- loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
+ if target_type.is_enum:
+ warning(self.target.pos,
+ "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
+ loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
@@ -6877,7 +6877,7 @@ class ForFromStatNode(LoopNode, StatNode):
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
- if target_type.is_numeric or target_type.is_enum:
+ if target_type.is_numeric or target_type.is_enum:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
@@ -6887,7 +6887,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
- self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
+ self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
@@ -6899,23 +6899,23 @@ class ForFromStatNode(LoopNode, StatNode):
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
- incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
- else:
- step = '1'
-
+ incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
+ else:
+ step = '1'
+
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
-
- loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
-
- if from_range and not self.is_py_target:
- loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
+
+ loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
+
+ if from_range and not self.is_py_target:
+ loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
else:
loopvar_name = self.loopvar_node.result()
- if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
+ if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
@@ -6929,18 +6929,18 @@ class ForFromStatNode(LoopNode, StatNode):
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
-
- coerced_loopvar_node = self.py_loopvar_node
- if coerced_loopvar_node is None and from_range:
- coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
- if coerced_loopvar_node is not None:
- coerced_loopvar_node.generate_evaluation_code(code)
- self.target.generate_assignment_code(coerced_loopvar_node, code)
-
+
+ coerced_loopvar_node = self.py_loopvar_node
+ if coerced_loopvar_node is None and from_range:
+ coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
+ if coerced_loopvar_node is not None:
+ coerced_loopvar_node.generate_evaluation_code(code)
+ self.target.generate_assignment_code(coerced_loopvar_node, code)
+
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
-
- if not from_range and self.py_loopvar_node:
+
+ if not from_range and self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
@@ -6972,17 +6972,17 @@ class ForFromStatNode(LoopNode, StatNode):
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
-
+
code.putln("}")
-
- if not from_range and self.py_loopvar_node:
+
+ if not from_range and self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
- if from_range and not self.is_py_target:
+ if from_range and not self.is_py_target:
code.funcstate.release_temp(loopvar_name)
-
+
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
@@ -7175,7 +7175,7 @@ class TryExceptStatNode(StatNode):
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
- in_generator = False
+ in_generator = False
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
@@ -7214,8 +7214,8 @@ class TryExceptStatNode(StatNode):
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
- try_break_label = code.new_label('try_break') if old_break_label else None
- try_continue_label = code.new_label('try_continue') if old_continue_label else None
+ try_break_label = code.new_label('try_break') if old_break_label else None
+ try_continue_label = code.new_label('try_continue') if old_continue_label else None
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
@@ -7236,9 +7236,9 @@ class TryExceptStatNode(StatNode):
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
- if not self.in_generator:
- save_exc.putln("__Pyx_PyThreadState_declare")
- save_exc.putln("__Pyx_PyThreadState_assign")
+ if not self.in_generator:
+ save_exc.putln("__Pyx_PyThreadState_declare")
+ save_exc.putln("__Pyx_PyThreadState_assign")
save_exc.putln("__Pyx_ExceptionSave(%s);" % (
', '.join(['&%s' % var for var in exc_save_vars])))
for var in exc_save_vars:
@@ -7252,8 +7252,8 @@ class TryExceptStatNode(StatNode):
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
- mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
- save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
+ mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
+ save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
def restore_saved_exception():
pass
@@ -7279,14 +7279,14 @@ class TryExceptStatNode(StatNode):
code.put_label(our_error_label)
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
-
- outer_except = code.funcstate.current_except
- # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed.
- code.funcstate.current_except = self
+
+ outer_except = code.funcstate.current_except
+ # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed.
+ code.funcstate.current_except = self
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
- code.funcstate.current_except = outer_except
-
+ code.funcstate.current_except = outer_except
+
if not self.has_default_clause:
code.put_goto(except_error_label)
@@ -7383,42 +7383,42 @@ class ExceptClauseNode(Node):
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
-
+
if self.pattern:
- has_non_literals = not all(
- pattern.is_literal or pattern.is_simple() and not pattern.is_temp
- for pattern in self.pattern)
-
- if has_non_literals:
- # For non-trivial exception check expressions, hide the live exception from C-API calls.
- exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
- for _ in range(3)]
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
- code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
- code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
- exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
- else:
- exc_vars = ()
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
- exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
-
+ has_non_literals = not all(
+ pattern.is_literal or pattern.is_simple() and not pattern.is_temp
+ for pattern in self.pattern)
+
+ if has_non_literals:
+ # For non-trivial exception check expressions, hide the live exception from C-API calls.
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
+ code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ else:
+ exc_vars = ()
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
- exc_tests.append(exc_test_func % pattern.py_result())
+ exc_tests.append(exc_test_func % pattern.py_result())
- match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
- code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
+ match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
-
- if has_non_literals:
- code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
- for temp in exc_vars:
- code.funcstate.release_temp(temp)
-
+
+ if has_non_literals:
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
code.putln(
"if (%s) {" %
match_flag)
@@ -7437,7 +7437,7 @@ class ExceptClauseNode(Node):
code.putln("}")
return
- exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
@@ -7447,8 +7447,8 @@ class ExceptClauseNode(Node):
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
exc_args, code.error_goto(self.pos)))
- for var in exc_vars:
- code.put_gotref(var)
+ for var in exc_vars:
+ code.put_gotref(var)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
@@ -7465,7 +7465,7 @@ class ExceptClauseNode(Node):
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
-
+
if not self.body.is_terminator:
for var in exc_vars:
# FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars),
@@ -7509,7 +7509,7 @@ class TryFinallyStatNode(StatNode):
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
- # in_generator inside of generator => must store away current exception also in return case
+ # in_generator inside of generator => must store away current exception also in return case
#
# Each of the continue, break, return and error gotos runs
# into its own deep-copy of the finally block code.
@@ -7527,7 +7527,7 @@ class TryFinallyStatNode(StatNode):
finally_except_clause = None
is_try_finally_in_nogil = False
- in_generator = False
+ in_generator = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
@@ -7599,10 +7599,10 @@ class TryFinallyStatNode(StatNode):
code.putln('}')
if preserve_error:
- code.put_label(new_error_label)
+ code.put_label(new_error_label)
code.putln('/*exception exit:*/{')
- if not self.in_generator:
- code.putln("__Pyx_PyThreadState_declare")
+ if not self.in_generator:
+ code.putln("__Pyx_PyThreadState_declare")
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
@@ -7650,47 +7650,47 @@ class TryFinallyStatNode(StatNode):
code.set_all_labels(old_labels)
return_label = code.return_label
- exc_vars = ()
-
+ exc_vars = ()
+
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
- code.putln('%s: {' % new_label)
+ code.putln('%s: {' % new_label)
ret_temp = None
- if old_label == return_label:
- # return actually raises an (uncatchable) exception in generators that we must preserve
- if self.in_generator:
- exc_vars = tuple([
- code.funcstate.allocate_temp(py_object_type, manage_ref=False)
- for _ in range(6)])
- self.put_error_catcher(code, [], exc_vars)
- if not self.finally_clause.is_terminator:
- # store away return value for later reuse
- if (self.func_return_type and
- not self.is_try_finally_in_nogil and
- not isinstance(self.finally_clause, GILExitNode)):
- ret_temp = code.funcstate.allocate_temp(
- self.func_return_type, manage_ref=False)
- code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
- if self.func_return_type.is_pyobject:
- code.putln("%s = 0;" % Naming.retval_cname)
-
- fresh_finally_clause().generate_execution_code(code)
-
- if old_label == return_label:
- if ret_temp:
- code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
+ if old_label == return_label:
+ # return actually raises an (uncatchable) exception in generators that we must preserve
+ if self.in_generator:
+ exc_vars = tuple([
+ code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+ for _ in range(6)])
+ self.put_error_catcher(code, [], exc_vars)
+ if not self.finally_clause.is_terminator:
+ # store away return value for later reuse
+ if (self.func_return_type and
+ not self.is_try_finally_in_nogil and
+ not isinstance(self.finally_clause, GILExitNode)):
+ ret_temp = code.funcstate.allocate_temp(
+ self.func_return_type, manage_ref=False)
+ code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
+ if self.func_return_type.is_pyobject:
+ code.putln("%s = 0;" % Naming.retval_cname)
+
+ fresh_finally_clause().generate_execution_code(code)
+
+ if old_label == return_label:
+ if ret_temp:
+ code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
- code.putln("%s = 0;" % ret_temp)
- code.funcstate.release_temp(ret_temp)
- if self.in_generator:
- self.put_error_uncatcher(code, exc_vars)
+ code.putln("%s = 0;" % ret_temp)
+ code.funcstate.release_temp(ret_temp)
+ if self.in_generator:
+ self.put_error_uncatcher(code, exc_vars)
for cname in exc_vars:
code.funcstate.release_temp(cname)
-
+
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
@@ -7705,7 +7705,7 @@ class TryFinallyStatNode(StatNode):
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
- exc_lineno_cnames=None, exc_filename_cname=None):
+ exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
@@ -7714,7 +7714,7 @@ class TryFinallyStatNode(StatNode):
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign")
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
@@ -7738,7 +7738,7 @@ class TryFinallyStatNode(StatNode):
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
- def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
+ def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
@@ -7759,7 +7759,7 @@ class TryFinallyStatNode(StatNode):
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
@@ -7818,7 +7818,7 @@ class GILStatNode(NogilTryFinallyStatNode):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
- if not collector.yields:
+ if not collector.yields:
return
if state == 'gil':
@@ -8235,17 +8235,17 @@ class ParallelStatNode(StatNode, ParallelNode):
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
- seen = set()
+ seen = set()
for dictitem in self.kwargs.key_value_pairs:
- if dictitem.key.value in seen:
- error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
- seen.add(dictitem.key.value)
+ if dictitem.key.value in seen:
+ error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
+ seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
- if not dictitem.value.is_none:
- self.num_threads = dictitem.value
+ if not dictitem.value.is_none:
+ self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
- if not dictitem.value.is_none:
- self.chunksize = dictitem.value
+ if not dictitem.value.is_none:
+ self.chunksize = dictitem.value
else:
pairs.append(dictitem)
@@ -8285,7 +8285,7 @@ class ParallelStatNode(StatNode, ParallelNode):
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
- if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
+ if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
@@ -8687,7 +8687,7 @@ class ParallelStatNode(StatNode, ParallelNode):
invalid_value = entry.type.invalid_value()
if invalid_value:
- init = ' = ' + entry.type.cast_code(invalid_value)
+ init = ' = ' + entry.type.cast_code(invalid_value)
else:
init = ''
# Declare the parallel private in the outer block
diff --git a/contrib/tools/cython/Cython/Compiler/Optimize.py b/contrib/tools/cython/Cython/Compiler/Optimize.py
index 3cb77efe2c..a6fdfaee9f 100644
--- a/contrib/tools/cython/Cython/Compiler/Optimize.py
+++ b/contrib/tools/cython/Cython/Compiler/Optimize.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import
-import re
+import re
import sys
import copy
import codecs
@@ -9,16 +9,16 @@ import itertools
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
-cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
+cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
- _py_string_types = (bytes, str)
+ _py_string_types = (bytes, str)
else:
_py_int_types = (int, long)
- _py_string_types = (bytes, unicode)
+ _py_string_types = (bytes, unicode)
from . import Nodes
from . import ExprNodes
@@ -29,8 +29,8 @@ from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
-from .StringEncoding import EncodedString, bytes_literal, encoded_string
-from .Errors import error, warning
+from .StringEncoding import EncodedString, bytes_literal, encoded_string
+from .Errors import error, warning
from .ParseTreeTransforms import SkipDeclarations
try:
@@ -189,61 +189,61 @@ class IterationTransform(Visitor.EnvTransform):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
- def _optimise_for_loop(self, node, iterable, reversed=False):
- annotation_type = None
- if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
- annotation = iterable.entry.annotation
- if annotation.is_subscript:
- annotation = annotation.base # container base type
- # FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
- if annotation.is_name:
- if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
- annotation_type = Builtin.dict_type
- elif annotation.name == 'Dict':
- annotation_type = Builtin.dict_type
- if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
- annotation_type = Builtin.set_type
- elif annotation.name in ('Set', 'FrozenSet'):
- annotation_type = Builtin.set_type
-
- if Builtin.dict_type in (iterable.type, annotation_type):
+ def _optimise_for_loop(self, node, iterable, reversed=False):
+ annotation_type = None
+ if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
+ annotation = iterable.entry.annotation
+ if annotation.is_subscript:
+ annotation = annotation.base # container base type
+ # FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
+ if annotation.is_name:
+ if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
+ annotation_type = Builtin.dict_type
+ elif annotation.name == 'Dict':
+ annotation_type = Builtin.dict_type
+ if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
+ annotation_type = Builtin.set_type
+ elif annotation.name in ('Set', 'FrozenSet'):
+ annotation_type = Builtin.set_type
+
+ if Builtin.dict_type in (iterable.type, annotation_type):
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
- node, dict_obj=iterable, method=None, keys=True, values=False)
-
- if (Builtin.set_type in (iterable.type, annotation_type) or
- Builtin.frozenset_type in (iterable.type, annotation_type)):
- if reversed:
- # CPython raises an error here: not a sequence
- return node
- return self._transform_set_iteration(node, iterable)
-
+ node, dict_obj=iterable, method=None, keys=True, values=False)
+
+ if (Builtin.set_type in (iterable.type, annotation_type) or
+ Builtin.frozenset_type in (iterable.type, annotation_type)):
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_set_iteration(node, iterable)
+
# C array (slice) iteration?
- if iterable.type.is_ptr or iterable.type.is_array:
- return self._transform_carray_iteration(node, iterable, reversed=reversed)
- if iterable.type is Builtin.bytes_type:
- return self._transform_bytes_iteration(node, iterable, reversed=reversed)
- if iterable.type is Builtin.unicode_type:
- return self._transform_unicode_iteration(node, iterable, reversed=reversed)
+ if iterable.type.is_ptr or iterable.type.is_array:
+ return self._transform_carray_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.bytes_type:
+ return self._transform_bytes_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.unicode_type:
+ return self._transform_unicode_iteration(node, iterable, reversed=reversed)
# the rest is based on function calls
- if not isinstance(iterable, ExprNodes.SimpleCallNode):
+ if not isinstance(iterable, ExprNodes.SimpleCallNode):
return node
- if iterable.args is None:
- arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
+ if iterable.args is None:
+ arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
else:
- arg_count = len(iterable.args)
- if arg_count and iterable.self is not None:
+ arg_count = len(iterable.args)
+ if arg_count and iterable.self is not None:
arg_count -= 1
- function = iterable.function
+ function = iterable.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
- base_obj = iterable.self or function.obj
+ base_obj = iterable.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
@@ -271,35 +271,35 @@ class IterationTransform(Visitor.EnvTransform):
node, base_obj, method, keys, values)
# enumerate/reversed ?
- if iterable.self is None and function.is_name and \
+ if iterable.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_enumerate_iteration(node, iterable)
+ return self._transform_enumerate_iteration(node, iterable)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_reversed_iteration(node, iterable)
+ return self._transform_reversed_iteration(node, iterable)
# range() iteration?
if Options.convert_range and 1 <= arg_count <= 3 and (
- iterable.self is None and
- function.is_name and function.name in ('range', 'xrange') and
- function.entry and function.entry.is_builtin):
- if node.target.type.is_int or node.target.type.is_enum:
- return self._transform_range_iteration(node, iterable, reversed=reversed)
- if node.target.type.is_pyobject:
- # Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
- for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
- if isinstance(arg, ExprNodes.IntNode):
- if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
- continue
- break
- else:
- return self._transform_range_iteration(node, iterable, reversed=reversed)
+ iterable.self is None and
+ function.is_name and function.name in ('range', 'xrange') and
+ function.entry and function.entry.is_builtin):
+ if node.target.type.is_int or node.target.type.is_enum:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
+ if node.target.type.is_pyobject:
+ # Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
+ for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
+ if isinstance(arg, ExprNodes.IntNode):
+ if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
+ continue
+ break
+ else:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
return node
@@ -804,7 +804,7 @@ class IterationTransform(Visitor.EnvTransform):
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
- for_node.set_up_loop(self.current_env())
+ for_node.set_up_loop(self.current_env())
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
@@ -929,7 +929,7 @@ class IterationTransform(Visitor.EnvTransform):
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
- "'NoneType' object has no attribute '%{0}s'".format('.30' if len(method) <= 30 else ''),
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(method) <= 30 else ''),
error = "PyExc_AttributeError",
format_args = [method])
else:
@@ -983,86 +983,86 @@ class IterationTransform(Visitor.EnvTransform):
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
- PySet_Iterator_func_type = PyrexTypes.CFuncType(
- PyrexTypes.py_object_type, [
- PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
- PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
- PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
- PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
- ])
-
- def _transform_set_iteration(self, node, set_obj):
- temps = []
- temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
- temps.append(temp)
- set_temp = temp.ref(set_obj.pos)
- temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
- temps.append(temp)
- pos_temp = temp.ref(node.pos)
-
- if isinstance(node.body, Nodes.StatListNode):
- body = node.body
- else:
- body = Nodes.StatListNode(pos = node.body.pos,
- stats = [node.body])
-
- # keep original length to guard against set modification
- set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
- temps.append(set_len_temp)
- set_len_temp_addr = ExprNodes.AmpersandNode(
- node.pos, operand=set_len_temp.ref(set_obj.pos),
- type=PyrexTypes.c_ptr_type(set_len_temp.type))
- temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
- temps.append(temp)
- is_set_temp = temp.ref(node.pos)
- is_set_temp_addr = ExprNodes.AmpersandNode(
- node.pos, operand=is_set_temp,
- type=PyrexTypes.c_ptr_type(temp.type))
-
- value_target = node.target
- iter_next_node = Nodes.SetIterationNextNode(
- set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
- iter_next_node = iter_next_node.analyse_expressions(self.current_env())
- body.stats[0:0] = [iter_next_node]
-
- def flag_node(value):
- value = value and 1 or 0
- return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
-
- result_code = [
- Nodes.SingleAssignmentNode(
- node.pos,
- lhs=pos_temp,
- rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
- Nodes.SingleAssignmentNode(
- set_obj.pos,
- lhs=set_temp,
- rhs=ExprNodes.PythonCapiCallNode(
- set_obj.pos,
- "__Pyx_set_iterator",
- self.PySet_Iterator_func_type,
- utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
- args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
- set_len_temp_addr, is_set_temp_addr,
- ],
- is_temp=True,
- )),
- Nodes.WhileStatNode(
- node.pos,
- condition=None,
- body=body,
- else_clause=node.else_clause,
- )
- ]
-
- return UtilNodes.TempsBlockNode(
- node.pos, temps=temps,
- body=Nodes.StatListNode(
- node.pos,
- stats = result_code
- ))
-
-
+ PySet_Iterator_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
+ ])
+
+ def _transform_set_iteration(self, node, set_obj):
+ temps = []
+ temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ temps.append(temp)
+ set_temp = temp.ref(set_obj.pos)
+ temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(temp)
+ pos_temp = temp.ref(node.pos)
+
+ if isinstance(node.body, Nodes.StatListNode):
+ body = node.body
+ else:
+ body = Nodes.StatListNode(pos = node.body.pos,
+ stats = [node.body])
+
+ # keep original length to guard against set modification
+ set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(set_len_temp)
+ set_len_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=set_len_temp.ref(set_obj.pos),
+ type=PyrexTypes.c_ptr_type(set_len_temp.type))
+ temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ temps.append(temp)
+ is_set_temp = temp.ref(node.pos)
+ is_set_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=is_set_temp,
+ type=PyrexTypes.c_ptr_type(temp.type))
+
+ value_target = node.target
+ iter_next_node = Nodes.SetIterationNextNode(
+ set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
+ iter_next_node = iter_next_node.analyse_expressions(self.current_env())
+ body.stats[0:0] = [iter_next_node]
+
+ def flag_node(value):
+ value = value and 1 or 0
+ return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
+
+ result_code = [
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=pos_temp,
+ rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
+ Nodes.SingleAssignmentNode(
+ set_obj.pos,
+ lhs=set_temp,
+ rhs=ExprNodes.PythonCapiCallNode(
+ set_obj.pos,
+ "__Pyx_set_iterator",
+ self.PySet_Iterator_func_type,
+ utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
+ args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
+ set_len_temp_addr, is_set_temp_addr,
+ ],
+ is_temp=True,
+ )),
+ Nodes.WhileStatNode(
+ node.pos,
+ condition=None,
+ body=body,
+ else_clause=node.else_clause,
+ )
+ ]
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=temps,
+ body=Nodes.StatListNode(
+ node.pos,
+ stats = result_code
+ ))
+
+
class SwitchTransform(Visitor.EnvTransform):
"""
This transformation tries to turn long if statements into C switch statements.
@@ -2035,11 +2035,11 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
"""
### cleanup to avoid redundant coercions to/from Python types
- def visit_PyTypeTestNode(self, node):
+ def visit_PyTypeTestNode(self, node):
"""Flatten redundant type checks after tree changes.
"""
self.visitchildren(node)
- return node.reanalyse()
+ return node.reanalyse()
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
@@ -2054,18 +2054,18 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
def visit_ExprStatNode(self, node):
"""
- Drop dead code and useless coercions.
+ Drop dead code and useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
- expr = node.expr
- if expr is None or expr.is_none or expr.is_literal:
- # Expression was removed or is dead code => remove ExprStatNode as well.
- return None
- if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
- # Ignore dead references to local variables etc.
- return None
+ expr = node.expr
+ if expr is None or expr.is_none or expr.is_literal:
+ # Expression was removed or is dead code => remove ExprStatNode as well.
+ return None
+ if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
+ # Ignore dead references to local variables etc.
+ return None
return node
def visit_CoerceToBooleanNode(self, node):
@@ -2283,8 +2283,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
- return self._optimise_generic_builtin_method_call(
- node, attr_name, function, arg_list, is_unbound_method)
+ return self._optimise_generic_builtin_method_call(
+ node, attr_name, function, arg_list, is_unbound_method)
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
@@ -2300,62 +2300,62 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
### builtin types
- def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
- """
- Try to inject an unbound method call for a call to a method of a known builtin type.
- This enables caching the underlying C function of the method at runtime.
- """
- arg_count = len(arg_list)
- if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
- return node
- if not function.obj.type.is_builtin_type:
- return node
- if function.obj.type.name in ('basestring', 'type'):
- # these allow different actual types => unsafe
- return node
- return ExprNodes.CachedBuiltinMethodCallNode(
- node, function.obj, attr_name, arg_list)
-
- PyObject_Unicode_func_type = PyrexTypes.CFuncType(
- Builtin.unicode_type, [
- PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
- ])
-
- def _handle_simple_function_unicode(self, node, function, pos_args):
- """Optimise single argument calls to unicode().
- """
- if len(pos_args) != 1:
- if len(pos_args) == 0:
- return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
- return node
- arg = pos_args[0]
- if arg.type is Builtin.unicode_type:
- if not arg.may_be_none():
- return arg
- cname = "__Pyx_PyUnicode_Unicode"
- utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
- else:
- cname = "__Pyx_PyObject_Unicode"
- utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
- return ExprNodes.PythonCapiCallNode(
- node.pos, cname, self.PyObject_Unicode_func_type,
- args=pos_args,
- is_temp=node.is_temp,
- utility_code=utility_code,
- py_name="unicode")
-
- def visit_FormattedValueNode(self, node):
- """Simplify or avoid plain string formatting of a unicode value.
- This seems misplaced here, but plain unicode formatting is essentially
- a call to the unicode() builtin, which is optimised right above.
- """
- self.visitchildren(node)
- if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
- if not node.conversion_char or node.conversion_char == 's':
- # value is definitely a unicode string and we don't format it any special
- return self._handle_simple_function_unicode(node, None, [node.value])
- return node
-
+ def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
+ """
+ Try to inject an unbound method call for a call to a method of a known builtin type.
+ This enables caching the underlying C function of the method at runtime.
+ """
+ arg_count = len(arg_list)
+ if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
+ return node
+ if not function.obj.type.is_builtin_type:
+ return node
+ if function.obj.type.name in ('basestring', 'type'):
+ # these allow different actual types => unsafe
+ return node
+ return ExprNodes.CachedBuiltinMethodCallNode(
+ node, function.obj, attr_name, arg_list)
+
+ PyObject_Unicode_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_unicode(self, node, function, pos_args):
+ """Optimise single argument calls to unicode().
+ """
+ if len(pos_args) != 1:
+ if len(pos_args) == 0:
+ return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.unicode_type:
+ if not arg.may_be_none():
+ return arg
+ cname = "__Pyx_PyUnicode_Unicode"
+ utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
+ else:
+ cname = "__Pyx_PyObject_Unicode"
+ utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, cname, self.PyObject_Unicode_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ utility_code=utility_code,
+ py_name="unicode")
+
+ def visit_FormattedValueNode(self, node):
+ """Simplify or avoid plain string formatting of a unicode value.
+ This seems misplaced here, but plain unicode formatting is essentially
+ a call to the unicode() builtin, which is optimised right above.
+ """
+ self.visitchildren(node)
+ if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
+ if not node.conversion_char or node.conversion_char == 's':
+ # value is definitely a unicode string and we don't format it any special
+ return self._handle_simple_function_unicode(node, None, [node.value])
+ return node
+
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
@@ -2398,7 +2398,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
- if len(pos_args) != 1 or not node.is_temp:
+ if len(pos_args) != 1 or not node.is_temp:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
@@ -2411,7 +2411,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=node.is_temp)
else:
- return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type)
+ return ExprNodes.AsTupleNode(node.pos, arg=arg, type=Builtin.tuple_type)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
@@ -2577,7 +2577,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
- Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
@@ -2609,14 +2609,14 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
- elif arg.type.is_memoryviewslice:
- func_type = PyrexTypes.CFuncType(
- PyrexTypes.c_size_t_type, [
- PyrexTypes.CFuncTypeArg("memoryviewslice", arg.type, None)
- ], nogil=True)
- new_node = ExprNodes.PythonCapiCallNode(
- node.pos, "__Pyx_MemoryView_Len", func_type,
- args=[arg], is_temp=node.is_temp)
+ elif arg.type.is_memoryviewslice:
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_size_t_type, [
+ PyrexTypes.CFuncTypeArg("memoryviewslice", arg.type, None)
+ ], nogil=True)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_MemoryView_Len", func_type,
+ args=[arg], is_temp=node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
@@ -2630,7 +2630,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
- args=[arg], is_temp=node.is_temp)
+ args=[arg], is_temp=node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
@@ -2811,7 +2811,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
- ext_type, [
+ ext_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
@@ -2824,7 +2824,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
- may_return_none=False,
+ may_return_none=False,
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
@@ -2872,69 +2872,69 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
utility_code=load_c_utility('append')
)
- def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
- """Replace list.extend([...]) for short sequence literals values by sequential appends
- to avoid creating an intermediate sequence argument.
- """
- if len(args) != 2:
- return node
- obj, value = args
- if not value.is_sequence_constructor:
- return node
- items = list(value.args)
- if value.mult_factor is not None or len(items) > 8:
- # Appending wins for short sequences but slows down when multiple resize operations are needed.
- # This seems to be a good enough limit that avoids repeated resizing.
- if False and isinstance(value, ExprNodes.ListNode):
- # One would expect that tuples are more efficient here, but benchmarking with
- # Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
- # Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
- # which is probably tuned more towards lists than tuples (and rightly so).
- tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
- Visitor.recursively_replace_node(node, args[1], tuple_node)
- return node
- wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
- if not items:
- # Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
- wrapped_obj.result_is_used = node.result_is_used
- return wrapped_obj
- cloned_obj = obj = wrapped_obj
- if len(items) > 1 and not obj.is_simple():
- cloned_obj = UtilNodes.LetRefNode(obj)
- # Use ListComp_Append() for all but the last item and finish with PyList_Append()
- # to shrink the list storage size at the very end if necessary.
- temps = []
- arg = items[-1]
- if not arg.is_simple():
- arg = UtilNodes.LetRefNode(arg)
- temps.append(arg)
- new_node = ExprNodes.PythonCapiCallNode(
- node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
- args=[cloned_obj, arg],
- is_temp=True,
- utility_code=load_c_utility("ListAppend"))
- for arg in items[-2::-1]:
- if not arg.is_simple():
- arg = UtilNodes.LetRefNode(arg)
- temps.append(arg)
- new_node = ExprNodes.binop_node(
- node.pos, '|',
- ExprNodes.PythonCapiCallNode(
- node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
- args=[cloned_obj, arg], py_name="extend",
- is_temp=True,
- utility_code=load_c_utility("ListCompAppend")),
- new_node,
- type=PyrexTypes.c_returncode_type,
- )
- new_node.result_is_used = node.result_is_used
- if cloned_obj is not obj:
- temps.append(cloned_obj)
- for temp in temps:
- new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
- new_node.result_is_used = node.result_is_used
- return new_node
-
+ def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
+ """Replace list.extend([...]) for short sequence literals values by sequential appends
+ to avoid creating an intermediate sequence argument.
+ """
+ if len(args) != 2:
+ return node
+ obj, value = args
+ if not value.is_sequence_constructor:
+ return node
+ items = list(value.args)
+ if value.mult_factor is not None or len(items) > 8:
+ # Appending wins for short sequences but slows down when multiple resize operations are needed.
+ # This seems to be a good enough limit that avoids repeated resizing.
+ if False and isinstance(value, ExprNodes.ListNode):
+ # One would expect that tuples are more efficient here, but benchmarking with
+ # Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
+ # Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
+ # which is probably tuned more towards lists than tuples (and rightly so).
+ tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
+ Visitor.recursively_replace_node(node, args[1], tuple_node)
+ return node
+ wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
+ if not items:
+ # Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
+ wrapped_obj.result_is_used = node.result_is_used
+ return wrapped_obj
+ cloned_obj = obj = wrapped_obj
+ if len(items) > 1 and not obj.is_simple():
+ cloned_obj = UtilNodes.LetRefNode(obj)
+ # Use ListComp_Append() for all but the last item and finish with PyList_Append()
+ # to shrink the list storage size at the very end if necessary.
+ temps = []
+ arg = items[-1]
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg],
+ is_temp=True,
+ utility_code=load_c_utility("ListAppend"))
+ for arg in items[-2::-1]:
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.binop_node(
+ node.pos, '|',
+ ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg], py_name="extend",
+ is_temp=True,
+ utility_code=load_c_utility("ListCompAppend")),
+ new_node,
+ type=PyrexTypes.c_returncode_type,
+ )
+ new_node.result_is_used = node.result_is_used
+ if cloned_obj is not obj:
+ temps.append(cloned_obj)
+ for temp in temps:
+ new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
+ new_node.result_is_used = node.result_is_used
+ return new_node
+
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
@@ -3010,7 +3010,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
- "'NoneType' object has no attribute '%.30s'",
+ "'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
@@ -3140,29 +3140,29 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
- PyDict_Pop_func_type = PyrexTypes.CFuncType(
- PyrexTypes.py_object_type, [
- PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
- PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
- PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
- ])
-
- def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
- """Replace dict.pop() by a call to _PyDict_Pop().
- """
- if len(args) == 2:
- args.append(ExprNodes.NullNode(node.pos))
- elif len(args) != 3:
- self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
- return node
-
- return self._substitute_method_call(
- node, function,
- "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
- 'pop', is_unbound_method, args,
- may_return_none=True,
- utility_code=load_c_utility('py_dict_pop'))
-
+ PyDict_Pop_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
+ """Replace dict.pop() by a call to _PyDict_Pop().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NullNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
+ return node
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
+ 'pop', is_unbound_method, args,
+ may_return_none=True,
+ utility_code=load_c_utility('py_dict_pop'))
+
Pyx_BinopInt_func_types = dict(
((ctype, ret_type), PyrexTypes.CFuncType(
ret_type, [
@@ -3635,7 +3635,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
- _special_encodings = ['UTF8', 'UTF16', 'UTF-16LE', 'UTF-16BE', 'Latin1', 'ASCII',
+ _special_encodings = ['UTF8', 'UTF16', 'UTF-16LE', 'UTF-16BE', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
@@ -3677,7 +3677,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
- if codec_name is not None and '-' not in codec_name:
+ if codec_name is not None and '-' not in codec_name:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
@@ -3747,7 +3747,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
- "'NoneType' object has no attribute '%.30s'",
+ "'NoneType' object has no attribute '%.30s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
@@ -3771,12 +3771,12 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
- if codec_name in ('UTF16', 'UTF-16LE', 'UTF-16BE'):
- codec_cname = "__Pyx_PyUnicode_Decode%s" % codec_name.replace('-', '')
- else:
- codec_cname = "PyUnicode_Decode%s" % codec_name
+ if codec_name in ('UTF16', 'UTF-16LE', 'UTF-16BE'):
+ codec_cname = "__Pyx_PyUnicode_Decode%s" % codec_name.replace('-', '')
+ else:
+ codec_cname = "PyUnicode_Decode%s" % codec_name
decode_function = ExprNodes.RawCNameExprNode(
- node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname=codec_cname)
+ node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type, cname=codec_cname)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
@@ -3936,8 +3936,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
- if with_none_check and args:
- args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
+ if with_none_check and args:
+ args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
@@ -3949,20 +3949,20 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
result_is_used = node.result_is_used,
)
- def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
- if self_arg.is_literal:
- return self_arg
- if is_unbound_method:
- self_arg = self_arg.as_none_safe_node(
- "descriptor '%s' requires a '%s' object but received a 'NoneType'",
- format_args=[attr_name, self_arg.type.name])
- else:
- self_arg = self_arg.as_none_safe_node(
- "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
- error="PyExc_AttributeError",
- format_args=[attr_name])
- return self_arg
-
+ def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
+ if self_arg.is_literal:
+ return self_arg
+ if is_unbound_method:
+ self_arg = self_arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[attr_name, self_arg.type.name])
+ else:
+ self_arg = self_arg.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
+ error="PyExc_AttributeError",
+ format_args=[attr_name])
+ return self_arg
+
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
@@ -4231,48 +4231,48 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
- if node.operand1.is_string_literal:
- return self._multiply_string(node, node.operand1, node.operand2)
- elif node.operand2.is_string_literal:
- return self._multiply_string(node, node.operand2, node.operand1)
+ if node.operand1.is_string_literal:
+ return self._multiply_string(node, node.operand1, node.operand2)
+ elif node.operand2.is_string_literal:
+ return self._multiply_string(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
- def _multiply_string(self, node, string_node, multiplier_node):
- multiplier = multiplier_node.constant_result
- if not isinstance(multiplier, _py_int_types):
- return node
- if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
- return node
- if len(node.constant_result) > 256:
- # Too long for static creation, leave it to runtime. (-> arbitrary limit)
- return node
-
- build_string = encoded_string
- if isinstance(string_node, ExprNodes.BytesNode):
- build_string = bytes_literal
- elif isinstance(string_node, ExprNodes.StringNode):
- if string_node.unicode_value is not None:
- string_node.unicode_value = encoded_string(
- string_node.unicode_value * multiplier,
- string_node.unicode_value.encoding)
+ def _multiply_string(self, node, string_node, multiplier_node):
+ multiplier = multiplier_node.constant_result
+ if not isinstance(multiplier, _py_int_types):
+ return node
+ if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
+ return node
+ if len(node.constant_result) > 256:
+ # Too long for static creation, leave it to runtime. (-> arbitrary limit)
+ return node
+
+ build_string = encoded_string
+ if isinstance(string_node, ExprNodes.BytesNode):
+ build_string = bytes_literal
+ elif isinstance(string_node, ExprNodes.StringNode):
+ if string_node.unicode_value is not None:
+ string_node.unicode_value = encoded_string(
+ string_node.unicode_value * multiplier,
+ string_node.unicode_value.encoding)
build_string = encoded_string if string_node.value.is_unicode else bytes_literal
- elif isinstance(string_node, ExprNodes.UnicodeNode):
- if string_node.bytes_value is not None:
- string_node.bytes_value = bytes_literal(
- string_node.bytes_value * multiplier,
- string_node.bytes_value.encoding)
- else:
- assert False, "unknown string node type: %s" % type(string_node)
+ elif isinstance(string_node, ExprNodes.UnicodeNode):
+ if string_node.bytes_value is not None:
+ string_node.bytes_value = bytes_literal(
+ string_node.bytes_value * multiplier,
+ string_node.bytes_value.encoding)
+ else:
+ assert False, "unknown string node type: %s" % type(string_node)
string_node.value = build_string(
- string_node.value * multiplier,
- string_node.value.encoding)
+ string_node.value * multiplier,
+ string_node.value.encoding)
# follow constant-folding and use unicode_value in preference
if isinstance(string_node, ExprNodes.StringNode) and string_node.unicode_value is not None:
string_node.constant_result = string_node.unicode_value
else:
string_node.constant_result = string_node.value
- return string_node
-
+ return string_node
+
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
@@ -4292,57 +4292,57 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
sequence_node.mult_factor = factor
return sequence_node
- def visit_ModNode(self, node):
- self.visitchildren(node)
- if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
- if not node.operand2.mult_factor:
- fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
- if fstring is not None:
- return fstring
- return self.visit_BinopNode(node)
-
- _parse_string_format_regex = (
+ def visit_ModNode(self, node):
+ self.visitchildren(node)
+ if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
+ if not node.operand2.mult_factor:
+ fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
+ if fstring is not None:
+ return fstring
+ return self.visit_BinopNode(node)
+
+ _parse_string_format_regex = (
u'(%(?:' # %...
u'(?:[-0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
u'(?:[.][0-9]+)?' # precision (optional)
u')?.)' # format type (or something different for unsupported formats)
- )
-
- def _build_fstring(self, pos, ustring, format_args):
- # Issues formatting warnings instead of errors since we really only catch a few errors by accident.
- args = iter(format_args)
- substrings = []
- can_be_optimised = True
- for s in re.split(self._parse_string_format_regex, ustring):
- if not s:
- continue
- if s == u'%%':
- substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
- continue
- if s[0] != u'%':
- if s[-1] == u'%':
- warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
- can_be_optimised = False
- substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
- continue
- format_type = s[-1]
- try:
- arg = next(args)
- except StopIteration:
- warning(pos, "Too few arguments for format placeholders", level=1)
- can_be_optimised = False
- break
+ )
+
+ def _build_fstring(self, pos, ustring, format_args):
+ # Issues formatting warnings instead of errors since we really only catch a few errors by accident.
+ args = iter(format_args)
+ substrings = []
+ can_be_optimised = True
+ for s in re.split(self._parse_string_format_regex, ustring):
+ if not s:
+ continue
+ if s == u'%%':
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
+ continue
+ if s[0] != u'%':
+ if s[-1] == u'%':
+ warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
+ can_be_optimised = False
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
+ continue
+ format_type = s[-1]
+ try:
+ arg = next(args)
+ except StopIteration:
+ warning(pos, "Too few arguments for format placeholders", level=1)
+ can_be_optimised = False
+ break
if arg.is_starred:
can_be_optimised = False
break
if format_type in u'asrfdoxX':
- format_spec = s[1:]
+ format_spec = s[1:]
conversion_char = None
- if format_type in u'doxX' and u'.' in format_spec:
- # Precision is not allowed for integers in format(), but ok in %-formatting.
- can_be_optimised = False
+ if format_type in u'doxX' and u'.' in format_spec:
+ # Precision is not allowed for integers in format(), but ok in %-formatting.
+ can_be_optimised = False
elif format_type in u'ars':
- format_spec = format_spec[:-1]
+ format_spec = format_spec[:-1]
conversion_char = format_type
if format_spec.startswith('0'):
format_spec = '>' + format_spec[1:] # right-alignment '%05s' spells '{:>5}'
@@ -4353,49 +4353,49 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
if format_spec.startswith('-'):
format_spec = '<' + format_spec[1:] # left-alignment '%-5s' spells '{:<5}'
- substrings.append(ExprNodes.FormattedValueNode(
- arg.pos, value=arg,
+ substrings.append(ExprNodes.FormattedValueNode(
+ arg.pos, value=arg,
conversion_char=conversion_char,
- format_spec=ExprNodes.UnicodeNode(
- pos, value=EncodedString(format_spec), constant_result=format_spec)
- if format_spec else None,
- ))
- else:
- # keep it simple for now ...
- can_be_optimised = False
+ format_spec=ExprNodes.UnicodeNode(
+ pos, value=EncodedString(format_spec), constant_result=format_spec)
+ if format_spec else None,
+ ))
+ else:
+ # keep it simple for now ...
+ can_be_optimised = False
break
-
- if not can_be_optimised:
- # Print all warnings we can find before finally giving up here.
- return None
-
- try:
- next(args)
- except StopIteration: pass
- else:
- warning(pos, "Too many arguments for format placeholders", level=1)
- return None
-
- node = ExprNodes.JoinedStrNode(pos, values=substrings)
- return self.visit_JoinedStrNode(node)
-
+
+ if not can_be_optimised:
+ # Print all warnings we can find before finally giving up here.
+ return None
+
+ try:
+ next(args)
+ except StopIteration: pass
+ else:
+ warning(pos, "Too many arguments for format placeholders", level=1)
+ return None
+
+ node = ExprNodes.JoinedStrNode(pos, values=substrings)
+ return self.visit_JoinedStrNode(node)
+
def visit_FormattedValueNode(self, node):
self.visitchildren(node)
- conversion_char = node.conversion_char or 's'
+ conversion_char = node.conversion_char or 's'
if isinstance(node.format_spec, ExprNodes.UnicodeNode) and not node.format_spec.value:
node.format_spec = None
- if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode):
- value = EncodedString(node.value.value)
- if value.isdigit():
- return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
- if node.format_spec is None and conversion_char == 's':
- value = None
- if isinstance(node.value, ExprNodes.UnicodeNode):
- value = node.value.value
- elif isinstance(node.value, ExprNodes.StringNode):
- value = node.value.unicode_value
- if value is not None:
- return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
+ if node.format_spec is None and isinstance(node.value, ExprNodes.IntNode):
+ value = EncodedString(node.value.value)
+ if value.isdigit():
+ return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
+ if node.format_spec is None and conversion_char == 's':
+ value = None
+ if isinstance(node.value, ExprNodes.UnicodeNode):
+ value = node.value.value
+ elif isinstance(node.value, ExprNodes.StringNode):
+ value = node.value.unicode_value
+ if value is not None:
+ return ExprNodes.UnicodeNode(node.value.pos, value=value, constant_result=value)
return node
def visit_JoinedStrNode(self, node):
@@ -4413,8 +4413,8 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
substrings = list(substrings)
unode = substrings[0]
if len(substrings) > 1:
- value = EncodedString(u''.join(value.value for value in substrings))
- unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value)
+ value = EncodedString(u''.join(value.value for value in substrings))
+ unode = ExprNodes.UnicodeNode(unode.pos, value=value, constant_result=value)
# ignore empty Unicode strings
if unode.value:
values.append(unode)
@@ -4422,8 +4422,8 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
values.extend(substrings)
if not values:
- value = EncodedString('')
- node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value)
+ value = EncodedString('')
+ node = ExprNodes.UnicodeNode(node.pos, value=value, constant_result=value)
elif len(values) == 1:
node = values[0]
elif len(values) == 2:
@@ -4713,7 +4713,7 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
visit_Node = Visitor.VisitorTransform.recurse_to_children
-class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
+class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
@@ -4722,11 +4722,11 @@ class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
- - eliminate useless string formatting steps
+ - eliminate useless string formatting steps
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
- in_loop = False
-
+ in_loop = False
+
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
@@ -4753,10 +4753,10 @@ class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
- elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
- "optimize.unpack_method_calls_in_pyinit"
- if not self.in_loop and self.current_env().is_module_scope
- else "optimize.unpack_method_calls")):
+ elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
+ "optimize.unpack_method_calls_in_pyinit"
+ if not self.in_loop and self.current_env().is_module_scope
+ else "optimize.unpack_method_calls")):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and len(node.arg_tuple.args) > 1)):
@@ -4787,11 +4787,11 @@ class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
- def visit_NumPyMethodCallNode(self, node):
- # Exclude from replacement above.
- self.visitchildren(node)
- return node
-
+ def visit_NumPyMethodCallNode(self, node):
+ # Exclude from replacement above.
+ self.visitchildren(node)
+ return node
+
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
@@ -4812,16 +4812,16 @@ class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
return node.arg
return node
- def visit_LoopNode(self, node):
- """Remember when we enter a loop as some expensive optimisations might still be worth it there.
- """
- old_val = self.in_loop
- self.in_loop = True
- self.visitchildren(node)
- self.in_loop = old_val
- return node
-
-
+ def visit_LoopNode(self, node):
+ """Remember when we enter a loop as some expensive optimisations might still be worth it there.
+ """
+ old_val = self.in_loop
+ self.in_loop = True
+ self.visitchildren(node)
+ self.in_loop = old_val
+ return node
+
+
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
diff --git a/contrib/tools/cython/Cython/Compiler/Options.py b/contrib/tools/cython/Cython/Compiler/Options.py
index b3ffbcd927..d29fd6b6c8 100644
--- a/contrib/tools/cython/Cython/Compiler/Options.py
+++ b/contrib/tools/cython/Cython/Compiler/Options.py
@@ -9,10 +9,10 @@ class ShouldBeFromDirective(object):
known_directives = []
- def __init__(self, options_name, directive_name=None, disallow=False):
+ def __init__(self, options_name, directive_name=None, disallow=False):
self.options_name = options_name
self.directive_name = directive_name or options_name
- self.disallow = disallow
+ self.disallow = disallow
self.known_directives.append(self)
def __nonzero__(self):
@@ -150,10 +150,10 @@ buffer_max_dims = 8
#: Number of function closure instances to keep in a freelist (0: no freelists)
closure_freelist_size = 8
-# Arcadia specific
-source_root = None
-
+# Arcadia specific
+source_root = None
+
def get_directive_defaults():
# To add an item to this list, all accesses should be changed to use the new
# directive, and the global option itself should be set to an instance of
@@ -178,7 +178,7 @@ _directive_defaults = {
'initializedcheck' : True,
'embedsignature' : False,
'auto_cpdef': False,
- 'auto_pickle': None,
+ 'auto_pickle': None,
'cdivision': False, # was True before 0.12
'cdivision_warnings': False,
'c_api_binop_methods': True,
@@ -202,15 +202,15 @@ _directive_defaults = {
'language_level': None,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
- 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079).
- 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax.
+ 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079).
+ 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax.
'c_string_type': 'bytes',
'c_string_encoding': '',
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
- 'unraisable_tracebacks': True,
+ 'unraisable_tracebacks': True,
'old_style_globals': False,
- 'np_pythran': False,
- 'fast_gil': False,
+ 'np_pythran': False,
+ 'fast_gil': False,
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
@@ -301,7 +301,7 @@ def normalise_encoding_name(option_name, encoding):
# Override types possibilities above, if needed
directive_types = {
'language_level': str, # values can be None/2/3/'3str', where None == 2+warning
- 'auto_pickle': bool,
+ 'auto_pickle': bool,
'locals': dict,
'final' : bool, # final cdef classes and methods
'nogil' : bool,
@@ -329,15 +329,15 @@ for key, val in _directive_defaults.items():
directive_scopes = { # defaults to available everywhere
# 'module', 'function', 'class', 'with statement'
- 'auto_pickle': ('module', 'cclass'),
+ 'auto_pickle': ('module', 'cclass'),
'final' : ('cclass', 'function'),
'nogil' : ('function', 'with statement'),
'inline' : ('function',),
'cfunc' : ('function', 'with statement'),
'ccall' : ('function', 'with statement'),
- 'returns' : ('function',),
- 'exceptval' : ('function',),
- 'locals' : ('function',),
+ 'returns' : ('function',),
+ 'exceptval' : ('function',),
+ 'locals' : ('function',),
'staticmethod' : ('function',), # FIXME: analysis currently lacks more specific function scope
'no_gc_clear' : ('cclass',),
'no_gc' : ('cclass',),
@@ -360,9 +360,9 @@ directive_scopes = { # defaults to available everywhere
# globals() could conceivably be controlled at a finer granularity,
# but that would complicate the implementation
'old_style_globals': ('module',),
- 'np_pythran': ('module',),
- 'fast_gil': ('module',),
- 'iterable_coroutine': ('module', 'function'),
+ 'np_pythran': ('module',),
+ 'fast_gil': ('module',),
+ 'iterable_coroutine': ('module', 'function'),
}
diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd
index 2c17901fa4..0f40c75160 100644
--- a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd
+++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.pxd
@@ -43,33 +43,33 @@ cdef class ExpandInplaceOperators(EnvTransform):
cdef class AlignFunctionDefinitions(CythonTransform):
cdef dict directives
- cdef set imported_names
- cdef object scope
+ cdef set imported_names
+ cdef object scope
-@cython.final
+@cython.final
cdef class YieldNodeCollector(TreeVisitor):
cdef public list yields
cdef public list returns
- cdef public list finallys
- cdef public list excepts
+ cdef public list finallys
+ cdef public list excepts
cdef public bint has_return_value
- cdef public bint has_yield
- cdef public bint has_await
+ cdef public bint has_yield
+ cdef public bint has_await
-@cython.final
+@cython.final
cdef class MarkClosureVisitor(CythonTransform):
cdef bint needs_closure
-@cython.final
+@cython.final
cdef class CreateClosureClasses(CythonTransform):
cdef list path
cdef bint in_lambda
cdef module_scope
cdef generator_class
- cdef create_class_from_scope(self, node, target_module_scope, inner_node=*)
- cdef find_entries_used_in_closures(self, node)
-
+ cdef create_class_from_scope(self, node, target_module_scope, inner_node=*)
+ cdef find_entries_used_in_closures(self, node)
+
#cdef class InjectGilHandling(VisitorTransform, SkipDeclarations):
# cdef bint nogil
diff --git a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
index 0da3670cae..88f028aa0c 100644
--- a/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
+++ b/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
@@ -7,7 +7,7 @@ cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
error=object, warning=object, copy=object, _unicode=object)
import copy
-import hashlib
+import hashlib
from . import PyrexTypes
from . import Naming
@@ -15,7 +15,7 @@ from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
-from . import Errors
+from . import Errors
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
@@ -180,7 +180,7 @@ class PostParse(ScopeTrackingTransform):
# unpack a lambda expression into the corresponding DefNode
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
- if collector.has_yield or collector.has_await or isinstance(node.result_expr, ExprNodes.YieldExprNode):
+ if collector.has_yield or collector.has_await or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
@@ -196,25 +196,25 @@ class PostParse(ScopeTrackingTransform):
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
- collector = YieldNodeCollector()
- collector.visitchildren(node.loop)
- node.def_node = Nodes.DefNode(
- node.pos, name=node.name, doc=None,
- args=[], star_arg=None, starstar_arg=None,
- body=node.loop, is_async_def=collector.has_await)
- self.visitchildren(node)
- return node
-
- def visit_ComprehensionNode(self, node):
- # enforce local scope also in Py2 for async generators (seriously, that's a Py3.6 feature...)
- if not node.has_local_scope:
- collector = YieldNodeCollector()
- collector.visitchildren(node.loop)
- if collector.has_await:
- node.has_local_scope = True
- self.visitchildren(node)
- return node
-
+ collector = YieldNodeCollector()
+ collector.visitchildren(node.loop)
+ node.def_node = Nodes.DefNode(
+ node.pos, name=node.name, doc=None,
+ args=[], star_arg=None, starstar_arg=None,
+ body=node.loop, is_async_def=collector.has_await)
+ self.visitchildren(node)
+ return node
+
+ def visit_ComprehensionNode(self, node):
+ # enforce local scope also in Py2 for async generators (seriously, that's a Py3.6 feature...)
+ if not node.has_local_scope:
+ collector = YieldNodeCollector()
+ collector.visitchildren(node.loop)
+ if collector.has_await:
+ node.has_local_scope = True
+ self.visitchildren(node)
+ return node
+
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
@@ -599,29 +599,29 @@ class PxdPostParse(CythonTransform, SkipDeclarations):
else:
return node
-
-class TrackNumpyAttributes(VisitorTransform, SkipDeclarations):
- # TODO: Make name handling as good as in InterpretCompilerDirectives() below - probably best to merge the two.
- def __init__(self):
- super(TrackNumpyAttributes, self).__init__()
- self.numpy_module_names = set()
-
- def visit_CImportStatNode(self, node):
- if node.module_name == u"numpy":
- self.numpy_module_names.add(node.as_name or u"numpy")
- return node
-
- def visit_AttributeNode(self, node):
- self.visitchildren(node)
+
+class TrackNumpyAttributes(VisitorTransform, SkipDeclarations):
+ # TODO: Make name handling as good as in InterpretCompilerDirectives() below - probably best to merge the two.
+ def __init__(self):
+ super(TrackNumpyAttributes, self).__init__()
+ self.numpy_module_names = set()
+
+ def visit_CImportStatNode(self, node):
+ if node.module_name == u"numpy":
+ self.numpy_module_names.add(node.as_name or u"numpy")
+ return node
+
+ def visit_AttributeNode(self, node):
+ self.visitchildren(node)
obj = node.obj
if (obj.is_name and obj.name in self.numpy_module_names) or obj.is_numpy_attribute:
- node.is_numpy_attribute = True
- return node
-
- visit_Node = VisitorTransform.recurse_to_children
-
-
-class InterpretCompilerDirectives(CythonTransform):
+ node.is_numpy_attribute = True
+ return node
+
+ visit_Node = VisitorTransform.recurse_to_children
+
+
+class InterpretCompilerDirectives(CythonTransform):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
@@ -841,16 +841,16 @@ class InterpretCompilerDirectives(CythonTransform):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
- directive = self.directive_names.get(node.name)
- if directive is not None:
- node.cython_attribute = directive
- return node
-
- def visit_NewExprNode(self, node):
- self.visit(node.cppclass)
- self.visitchildren(node)
+ directive = self.directive_names.get(node.name)
+ if directive is not None:
+ node.cython_attribute = directive
return node
+ def visit_NewExprNode(self, node):
+ self.visit(node.cppclass)
+ self.visitchildren(node)
+ return node
+
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
@@ -886,8 +886,8 @@ class InterpretCompilerDirectives(CythonTransform):
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
- arg = ExprNodes.BoolNode(node.pos, value=True)
- return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
+ arg = ExprNodes.BoolNode(node.pos, value=True)
+ return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
elif directivetype is None:
return [(optname, None)]
else:
@@ -896,25 +896,25 @@ class InterpretCompilerDirectives(CythonTransform):
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
- if optname == 'np_pythran' and not self.context.cpp:
- raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
- elif optname == 'exceptval':
- # default: exceptval(None, check=True)
- arg_error = len(args) > 1
- check = True
- if kwds and kwds.key_value_pairs:
- kw = kwds.key_value_pairs[0]
- if (len(kwds.key_value_pairs) == 1 and
- kw.key.is_string_literal and kw.key.value == 'check' and
- isinstance(kw.value, ExprNodes.BoolNode)):
- check = kw.value.value
- else:
- arg_error = True
- if arg_error:
- raise PostParseError(
- pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"')
- return ('exceptval', (args[0] if args else None, check))
-
+ if optname == 'np_pythran' and not self.context.cpp:
+ raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
+ elif optname == 'exceptval':
+ # default: exceptval(None, check=True)
+ arg_error = len(args) > 1
+ check = True
+ if kwds and kwds.key_value_pairs:
+ kw = kwds.key_value_pairs[0]
+ if (len(kwds.key_value_pairs) == 1 and
+ kw.key.is_string_literal and kw.key.value == 'check' and
+ isinstance(kw.value, ExprNodes.BoolNode)):
+ check = kw.value.value
+ else:
+ arg_error = True
+ if arg_error:
+ raise PostParseError(
+ pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"')
+ return ('exceptval', (args[0] if args else None, check))
+
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
@@ -945,7 +945,7 @@ class InterpretCompilerDirectives(CythonTransform):
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
- if kwds and len(kwds.key_value_pairs) != 0:
+ if kwds and len(kwds.key_value_pairs) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
@@ -1014,8 +1014,8 @@ class InterpretCompilerDirectives(CythonTransform):
directives = []
realdecs = []
both = []
- # Decorators coming first take precedence.
- for dec in node.decorators[::-1]:
+ # Decorators coming first take precedence.
+ for dec in node.decorators[::-1]:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
@@ -1025,15 +1025,15 @@ class InterpretCompilerDirectives(CythonTransform):
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
- # Adapt scope type based on decorators that change it.
- if directive[0] == 'cclass' and scope_name == 'class':
- scope_name = 'cclass'
+ # Adapt scope type based on decorators that change it.
+ if directive[0] == 'cclass' and scope_name == 'class':
+ scope_name = 'cclass'
else:
realdecs.append(dec)
- if realdecs and (scope_name == 'cclass' or
- isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
+ if realdecs and (scope_name == 'cclass' or
+ isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
- node.decorators = realdecs[::-1] + both[::-1]
+ node.decorators = realdecs[::-1] + both[::-1]
# merge or override repeated directives
optdict = {}
for directive in directives:
@@ -1283,7 +1283,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
pos, with_stat=node,
test_if_run=False,
args=excinfo_target,
- await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
body=Nodes.ReraiseStatNode(pos),
),
],
@@ -1305,7 +1305,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
- await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
handle_error_case=False,
)
return node
@@ -1376,28 +1376,28 @@ class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
elif decorator.is_attribute and decorator.obj.name in properties:
handler_name = self._map_property_attribute(decorator.attribute)
if handler_name:
- if decorator.obj.name != node.name:
- # CPython does not generate an error or warning, but not something useful either.
- error(decorator_node.pos,
- "Mismatching property names, expected '%s', got '%s'" % (
- decorator.obj.name, node.name))
- elif len(node.decorators) > 1:
+ if decorator.obj.name != node.name:
+ # CPython does not generate an error or warning, but not something useful either.
+ error(decorator_node.pos,
+ "Mismatching property names, expected '%s', got '%s'" % (
+ decorator.obj.name, node.name))
+ elif len(node.decorators) > 1:
return self._reject_decorated_property(node, decorator_node)
- else:
- return self._add_to_property(properties, node, handler_name, decorator_node)
-
- # we clear node.decorators, so we need to set the
- # is_staticmethod/is_classmethod attributes now
- for decorator in node.decorators:
- func = decorator.decorator
- if func.is_name:
- node.is_classmethod |= func.name == 'classmethod'
- node.is_staticmethod |= func.name == 'staticmethod'
-
+ else:
+ return self._add_to_property(properties, node, handler_name, decorator_node)
+
+ # we clear node.decorators, so we need to set the
+ # is_staticmethod/is_classmethod attributes now
+ for decorator in node.decorators:
+ func = decorator.decorator
+ if func.is_name:
+ node.is_classmethod |= func.name == 'classmethod'
+ node.is_staticmethod |= func.name == 'staticmethod'
+
# transform normal decorators
- decs = node.decorators
- node.decorators = None
- return self.chain_decorators(node, decs, node.name)
+ decs = node.decorators
+ node.decorators = None
+ return self.chain_decorators(node, decs, node.name)
@staticmethod
def _reject_decorated_property(node, decorator_node):
@@ -1531,13 +1531,13 @@ class ForwardDeclareTypes(CythonTransform):
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
- # Expand fused methods of .pxd declared types to construct the final vtable order.
- type = self.module_scope.entries[node.class_name].type
- if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope:
- scope = type.scope
- for entry in scope.cfunc_entries:
- if entry.type and entry.type.is_fused:
- entry.type.get_all_specialized_function_types()
+ # Expand fused methods of .pxd declared types to construct the final vtable order.
+ type = self.module_scope.entries[node.class_name].type
+ if type is not None and type.is_extension_type and not type.is_builtin_type and type.scope:
+ scope = type.scope
+ for entry in scope.cfunc_entries:
+ if entry.type and entry.type.is_fused:
+ entry.type.get_all_specialized_function_types()
return node
@@ -1602,13 +1602,13 @@ if VALUE is not None:
return node
def visit_ModuleNode(self, node):
- # Pickling support requires injecting module-level nodes.
- self.extra_module_declarations = []
+ # Pickling support requires injecting module-level nodes.
+ self.extra_module_declarations = []
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
- node.body.stats.extend(self.extra_module_declarations)
+ node.body.stats.extend(self.extra_module_declarations)
return node
def visit_LambdaNode(self, node):
@@ -1630,145 +1630,145 @@ if VALUE is not None:
stats.append(property)
if stats:
node.body.stats += stats
- if (node.visibility != 'extern'
- and not node.scope.lookup('__reduce__')
- and not node.scope.lookup('__reduce_ex__')):
- self._inject_pickle_methods(node)
- return node
-
- def _inject_pickle_methods(self, node):
- env = self.current_env()
- if node.scope.directives['auto_pickle'] is False: # None means attempt it.
- # Old behavior of not doing anything.
- return
- auto_pickle_forced = node.scope.directives['auto_pickle'] is True
-
- all_members = []
- cls = node.entry.type
- cinit = None
- inherited_reduce = None
- while cls is not None:
- all_members.extend(e for e in cls.scope.var_entries if e.name not in ('__weakref__', '__dict__'))
- cinit = cinit or cls.scope.lookup('__cinit__')
- inherited_reduce = inherited_reduce or cls.scope.lookup('__reduce__') or cls.scope.lookup('__reduce_ex__')
- cls = cls.base_type
- all_members.sort(key=lambda e: e.name)
-
- if inherited_reduce:
- # This is not failsafe, as we may not know whether a cimported class defines a __reduce__.
- # This is why we define __reduce_cython__ and only replace __reduce__
- # (via ExtensionTypes.SetupReduce utility code) at runtime on class creation.
- return
-
- non_py = [
- e for e in all_members
- if not e.type.is_pyobject and (not e.type.can_coerce_to_pyobject(env)
- or not e.type.can_coerce_from_pyobject(env))
- ]
-
- structs = [e for e in all_members if e.type.is_struct_or_union]
-
- if cinit or non_py or (structs and not auto_pickle_forced):
- if cinit:
- # TODO(robertwb): We could allow this if __cinit__ has no require arguments.
- msg = 'no default __reduce__ due to non-trivial __cinit__'
- elif non_py:
- msg = "%s cannot be converted to a Python object for pickling" % ','.join("self.%s" % e.name for e in non_py)
- else:
- # Extern structs may be only partially defined.
- # TODO(robertwb): Limit the restriction to extern
- # (and recursively extern-containing) structs.
- msg = ("Pickling of struct members such as %s must be explicitly requested "
- "with @auto_pickle(True)" % ','.join("self.%s" % e.name for e in structs))
-
- if auto_pickle_forced:
- error(node.pos, msg)
-
- pickle_func = TreeFragment(u"""
- def __reduce_cython__(self):
- raise TypeError("%(msg)s")
- def __setstate_cython__(self, __pyx_state):
- raise TypeError("%(msg)s")
- """ % {'msg': msg},
- level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
- pickle_func.analyse_declarations(node.scope)
- self.visit(pickle_func)
- node.body.stats.append(pickle_func)
-
- else:
- for e in all_members:
- if not e.type.is_pyobject:
- e.type.create_to_py_utility_code(env)
- e.type.create_from_py_utility_code(env)
- all_members_names = sorted([e.name for e in all_members])
- checksum = '0x%s' % hashlib.md5(' '.join(all_members_names).encode('utf-8')).hexdigest()[:7]
- unpickle_func_name = '__pyx_unpickle_%s' % node.class_name
-
- # TODO(robertwb): Move the state into the third argument
- # so it can be pickled *after* self is memoized.
- unpickle_func = TreeFragment(u"""
- def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state):
+ if (node.visibility != 'extern'
+ and not node.scope.lookup('__reduce__')
+ and not node.scope.lookup('__reduce_ex__')):
+ self._inject_pickle_methods(node)
+ return node
+
+ def _inject_pickle_methods(self, node):
+ env = self.current_env()
+ if node.scope.directives['auto_pickle'] is False: # None means attempt it.
+ # Old behavior of not doing anything.
+ return
+ auto_pickle_forced = node.scope.directives['auto_pickle'] is True
+
+ all_members = []
+ cls = node.entry.type
+ cinit = None
+ inherited_reduce = None
+ while cls is not None:
+ all_members.extend(e for e in cls.scope.var_entries if e.name not in ('__weakref__', '__dict__'))
+ cinit = cinit or cls.scope.lookup('__cinit__')
+ inherited_reduce = inherited_reduce or cls.scope.lookup('__reduce__') or cls.scope.lookup('__reduce_ex__')
+ cls = cls.base_type
+ all_members.sort(key=lambda e: e.name)
+
+ if inherited_reduce:
+ # This is not failsafe, as we may not know whether a cimported class defines a __reduce__.
+ # This is why we define __reduce_cython__ and only replace __reduce__
+ # (via ExtensionTypes.SetupReduce utility code) at runtime on class creation.
+ return
+
+ non_py = [
+ e for e in all_members
+ if not e.type.is_pyobject and (not e.type.can_coerce_to_pyobject(env)
+ or not e.type.can_coerce_from_pyobject(env))
+ ]
+
+ structs = [e for e in all_members if e.type.is_struct_or_union]
+
+ if cinit or non_py or (structs and not auto_pickle_forced):
+ if cinit:
+ # TODO(robertwb): We could allow this if __cinit__ has no require arguments.
+ msg = 'no default __reduce__ due to non-trivial __cinit__'
+ elif non_py:
+ msg = "%s cannot be converted to a Python object for pickling" % ','.join("self.%s" % e.name for e in non_py)
+ else:
+ # Extern structs may be only partially defined.
+ # TODO(robertwb): Limit the restriction to extern
+ # (and recursively extern-containing) structs.
+ msg = ("Pickling of struct members such as %s must be explicitly requested "
+ "with @auto_pickle(True)" % ','.join("self.%s" % e.name for e in structs))
+
+ if auto_pickle_forced:
+ error(node.pos, msg)
+
+ pickle_func = TreeFragment(u"""
+ def __reduce_cython__(self):
+ raise TypeError("%(msg)s")
+ def __setstate_cython__(self, __pyx_state):
+ raise TypeError("%(msg)s")
+ """ % {'msg': msg},
+ level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
+ pickle_func.analyse_declarations(node.scope)
+ self.visit(pickle_func)
+ node.body.stats.append(pickle_func)
+
+ else:
+ for e in all_members:
+ if not e.type.is_pyobject:
+ e.type.create_to_py_utility_code(env)
+ e.type.create_from_py_utility_code(env)
+ all_members_names = sorted([e.name for e in all_members])
+ checksum = '0x%s' % hashlib.md5(' '.join(all_members_names).encode('utf-8')).hexdigest()[:7]
+ unpickle_func_name = '__pyx_unpickle_%s' % node.class_name
+
+ # TODO(robertwb): Move the state into the third argument
+ # so it can be pickled *after* self is memoized.
+ unpickle_func = TreeFragment(u"""
+ def %(unpickle_func_name)s(__pyx_type, long __pyx_checksum, __pyx_state):
cdef object __pyx_PickleError
cdef object __pyx_result
- if __pyx_checksum != %(checksum)s:
- from pickle import PickleError as __pyx_PickleError
- raise __pyx_PickleError("Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum)
- __pyx_result = %(class_name)s.__new__(__pyx_type)
- if __pyx_state is not None:
- %(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state)
- return __pyx_result
-
- cdef %(unpickle_func_name)s__set_state(%(class_name)s __pyx_result, tuple __pyx_state):
- %(assignments)s
- if len(__pyx_state) > %(num_members)d and hasattr(__pyx_result, '__dict__'):
- __pyx_result.__dict__.update(__pyx_state[%(num_members)d])
- """ % {
- 'unpickle_func_name': unpickle_func_name,
- 'checksum': checksum,
- 'members': ', '.join(all_members_names),
- 'class_name': node.class_name,
- 'assignments': '; '.join(
- '__pyx_result.%s = __pyx_state[%s]' % (v, ix)
- for ix, v in enumerate(all_members_names)),
- 'num_members': len(all_members_names),
- }, level='module', pipeline=[NormalizeTree(None)]).substitute({})
- unpickle_func.analyse_declarations(node.entry.scope)
- self.visit(unpickle_func)
- self.extra_module_declarations.append(unpickle_func)
-
- pickle_func = TreeFragment(u"""
- def __reduce_cython__(self):
+ if __pyx_checksum != %(checksum)s:
+ from pickle import PickleError as __pyx_PickleError
+ raise __pyx_PickleError("Incompatible checksums (%%s vs %(checksum)s = (%(members)s))" %% __pyx_checksum)
+ __pyx_result = %(class_name)s.__new__(__pyx_type)
+ if __pyx_state is not None:
+ %(unpickle_func_name)s__set_state(<%(class_name)s> __pyx_result, __pyx_state)
+ return __pyx_result
+
+ cdef %(unpickle_func_name)s__set_state(%(class_name)s __pyx_result, tuple __pyx_state):
+ %(assignments)s
+ if len(__pyx_state) > %(num_members)d and hasattr(__pyx_result, '__dict__'):
+ __pyx_result.__dict__.update(__pyx_state[%(num_members)d])
+ """ % {
+ 'unpickle_func_name': unpickle_func_name,
+ 'checksum': checksum,
+ 'members': ', '.join(all_members_names),
+ 'class_name': node.class_name,
+ 'assignments': '; '.join(
+ '__pyx_result.%s = __pyx_state[%s]' % (v, ix)
+ for ix, v in enumerate(all_members_names)),
+ 'num_members': len(all_members_names),
+ }, level='module', pipeline=[NormalizeTree(None)]).substitute({})
+ unpickle_func.analyse_declarations(node.entry.scope)
+ self.visit(unpickle_func)
+ self.extra_module_declarations.append(unpickle_func)
+
+ pickle_func = TreeFragment(u"""
+ def __reduce_cython__(self):
cdef tuple state
cdef object _dict
- cdef bint use_setstate
- state = (%(members)s)
- _dict = getattr(self, '__dict__', None)
- if _dict is not None:
- state += (_dict,)
- use_setstate = True
- else:
- use_setstate = %(any_notnone_members)s
- if use_setstate:
- return %(unpickle_func_name)s, (type(self), %(checksum)s, None), state
- else:
- return %(unpickle_func_name)s, (type(self), %(checksum)s, state)
-
- def __setstate_cython__(self, __pyx_state):
- %(unpickle_func_name)s__set_state(self, __pyx_state)
- """ % {
- 'unpickle_func_name': unpickle_func_name,
- 'checksum': checksum,
- 'members': ', '.join('self.%s' % v for v in all_members_names) + (',' if len(all_members_names) == 1 else ''),
- # Even better, we could check PyType_IS_GC.
- 'any_notnone_members' : ' or '.join(['self.%s is not None' % e.name for e in all_members if e.type.is_pyobject] or ['False']),
- },
- level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
- pickle_func.analyse_declarations(node.scope)
+ cdef bint use_setstate
+ state = (%(members)s)
+ _dict = getattr(self, '__dict__', None)
+ if _dict is not None:
+ state += (_dict,)
+ use_setstate = True
+ else:
+ use_setstate = %(any_notnone_members)s
+ if use_setstate:
+ return %(unpickle_func_name)s, (type(self), %(checksum)s, None), state
+ else:
+ return %(unpickle_func_name)s, (type(self), %(checksum)s, state)
+
+ def __setstate_cython__(self, __pyx_state):
+ %(unpickle_func_name)s__set_state(self, __pyx_state)
+ """ % {
+ 'unpickle_func_name': unpickle_func_name,
+ 'checksum': checksum,
+ 'members': ', '.join('self.%s' % v for v in all_members_names) + (',' if len(all_members_names) == 1 else ''),
+ # Even better, we could check PyType_IS_GC.
+ 'any_notnone_members' : ' or '.join(['self.%s is not None' % e.name for e in all_members if e.type.is_pyobject] or ['False']),
+ },
+ level='c_class', pipeline=[NormalizeTree(None)]).substitute({})
+ pickle_func.analyse_declarations(node.scope)
self.enter_scope(node, node.scope) # functions should be visited in the class scope
- self.visit(pickle_func)
+ self.visit(pickle_func)
self.exit_scope()
- node.body.stats.append(pickle_func)
-
+ node.body.stats.append(pickle_func)
+
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
@@ -1868,7 +1868,7 @@ if VALUE is not None:
def visit_FuncDefNode(self, node):
"""
- Analyse a function and its body, as that hasn't happened yet. Also
+ Analyse a function and its body, as that hasn't happened yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
@@ -1931,8 +1931,8 @@ if VALUE is not None:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
- if node.is_generator:
- node.gbody.code_object = node.code_object
+ if node.is_generator:
+ node.gbody.code_object = node.code_object
if env.is_py_class_scope:
rhs.binding = True
@@ -2059,7 +2059,7 @@ if VALUE is not None:
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
- # on these nodes in a separate recursive process from the
+ # on these nodes in a separate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
@@ -2354,20 +2354,20 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
if 'inline' in self.directives:
modifiers.append('inline')
nogil = self.directives.get('nogil')
- except_val = self.directives.get('exceptval')
- return_type_node = self.directives.get('returns')
- if return_type_node is None and self.directives['annotation_typing']:
- return_type_node = node.return_type_annotation
- # for Python anntations, prefer safe exception handling by default
- if return_type_node is not None and except_val is None:
- except_val = (None, True) # except *
- elif except_val is None:
- # backward compatible default: no exception check
- except_val = (None, False)
+ except_val = self.directives.get('exceptval')
+ return_type_node = self.directives.get('returns')
+ if return_type_node is None and self.directives['annotation_typing']:
+ return_type_node = node.return_type_annotation
+ # for Python anntations, prefer safe exception handling by default
+ if return_type_node is not None and except_val is None:
+ except_val = (None, True) # except *
+ elif except_val is None:
+ # backward compatible default: no exception check
+ except_val = (None, False)
if 'ccall' in self.directives:
node = node.as_cfunction(
overridable=True, modifiers=modifiers, nogil=nogil,
- returns=return_type_node, except_val=except_val)
+ returns=return_type_node, except_val=except_val)
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
@@ -2375,7 +2375,7 @@ class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
else:
node = node.as_cfunction(
overridable=False, modifiers=modifiers, nogil=nogil,
- returns=return_type_node, except_val=except_val)
+ returns=return_type_node, except_val=except_val)
return self.visit(node)
if 'inline' in modifiers:
error(node.pos, "Python functions cannot be declared 'inline'")
@@ -2531,23 +2531,23 @@ class YieldNodeCollector(TreeVisitor):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.returns = []
- self.finallys = []
- self.excepts = []
+ self.finallys = []
+ self.excepts = []
self.has_return_value = False
- self.has_yield = False
- self.has_await = False
+ self.has_yield = False
+ self.has_await = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
- self.has_yield = True
+ self.has_yield = True
self.visitchildren(node)
def visit_AwaitExprNode(self, node):
- self.yields.append(node)
- self.has_await = True
+ self.yields.append(node)
+ self.has_await = True
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
@@ -2556,14 +2556,14 @@ class YieldNodeCollector(TreeVisitor):
self.has_return_value = True
self.returns.append(node)
- def visit_TryFinallyStatNode(self, node):
- self.visitchildren(node)
- self.finallys.append(node)
-
- def visit_TryExceptStatNode(self, node):
- self.visitchildren(node)
- self.excepts.append(node)
-
+ def visit_TryFinallyStatNode(self, node):
+ self.visitchildren(node)
+ self.finallys.append(node)
+
+ def visit_TryExceptStatNode(self, node):
+ self.visitchildren(node)
+ self.excepts.append(node)
+
def visit_ClassDefNode(self, node):
pass
@@ -2599,31 +2599,31 @@ class MarkClosureVisitor(CythonTransform):
collector.visitchildren(node)
if node.is_async_def:
- coroutine_type = Nodes.AsyncDefNode
- if collector.has_yield:
- coroutine_type = Nodes.AsyncGenNode
- for yield_expr in collector.yields + collector.returns:
- yield_expr.in_async_gen = True
- elif self.current_directives['iterable_coroutine']:
- coroutine_type = Nodes.IterableAsyncDefNode
- elif collector.has_await:
- found = next(y for y in collector.yields if y.is_await)
- error(found.pos, "'await' not allowed in generators (use 'yield')")
- return node
- elif collector.has_yield:
- coroutine_type = Nodes.GeneratorDefNode
+ coroutine_type = Nodes.AsyncDefNode
+ if collector.has_yield:
+ coroutine_type = Nodes.AsyncGenNode
+ for yield_expr in collector.yields + collector.returns:
+ yield_expr.in_async_gen = True
+ elif self.current_directives['iterable_coroutine']:
+ coroutine_type = Nodes.IterableAsyncDefNode
+ elif collector.has_await:
+ found = next(y for y in collector.yields if y.is_await)
+ error(found.pos, "'await' not allowed in generators (use 'yield')")
+ return node
+ elif collector.has_yield:
+ coroutine_type = Nodes.GeneratorDefNode
else:
return node
- for i, yield_expr in enumerate(collector.yields, 1):
+ for i, yield_expr in enumerate(collector.yields, 1):
yield_expr.label_num = i
- for retnode in collector.returns + collector.finallys + collector.excepts:
+ for retnode in collector.returns + collector.finallys + collector.excepts:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
- pos=node.pos, name=node.name, body=node.body,
- is_async_gen_body=node.is_async_def and collector.has_yield)
- coroutine = coroutine_type(
+ pos=node.pos, name=node.name, body=node.body,
+ is_async_gen_body=node.is_async_def and collector.has_yield)
+ coroutine = coroutine_type(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
@@ -2670,28 +2670,28 @@ class CreateClosureClasses(CythonTransform):
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
- for scope in node.local_scope.iter_local_scopes():
- for name, entry in scope.entries.items():
- if not name:
- continue
- if entry.from_closure:
- from_closure.append((name, entry))
- elif entry.in_closure:
- in_closure.append((name, entry))
+ for scope in node.local_scope.iter_local_scopes():
+ for name, entry in scope.entries.items():
+ if not name:
+ continue
+ if entry.from_closure:
+ from_closure.append((name, entry))
+ elif entry.in_closure:
+ in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
- for scope in node.local_scope.iter_local_scopes():
- for entry in scope.entries.values():
+ for scope in node.local_scope.iter_local_scopes():
+ for entry in scope.entries.values():
if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal):
- entry.in_closure = True
+ entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
- # Now from the beginning
+ # Now from the beginning
node.needs_closure = False
node.needs_outer_scope = False
@@ -2733,10 +2733,10 @@ class CreateClosureClasses(CythonTransform):
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
- class_scope.is_closure_class_scope = True
- if node.is_async_def or node.is_generator:
- # Generators need their closure intact during cleanup as they resume to handle GeneratorExit
- class_scope.directives['no_gc_clear'] = True
+ class_scope.is_closure_class_scope = True
+ if node.is_async_def or node.is_generator:
+ # Generators need their closure intact during cleanup as they resume to handle GeneratorExit
+ class_scope.directives['no_gc_clear'] = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
@@ -2749,12 +2749,12 @@ class CreateClosureClasses(CythonTransform):
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
- closure_entry = class_scope.declare_var(
- pos=entry.pos,
- name=entry.name if not entry.in_subscope else None,
- cname=entry.cname,
- type=entry.type,
- is_cdef=True)
+ closure_entry = class_scope.declare_var(
+ pos=entry.pos,
+ name=entry.name if not entry.in_subscope else None,
+ cname=entry.cname,
+ type=entry.type,
+ is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
@@ -3191,22 +3191,22 @@ class TransformBuiltinMethods(EnvTransform):
def visit_GeneralCallNode(self, node):
function = node.function.as_cython_attribute()
- if function == u'cast':
- # NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args
+ if function == u'cast':
+ # NOTE: assuming simple tuple/dict nodes for positional_args and keyword_args
args = node.positional_args.args
kwargs = node.keyword_args.compile_time_value(None)
- if (len(args) != 2 or len(kwargs) > 1 or
- (len(kwargs) == 1 and 'typecheck' not in kwargs)):
- error(node.function.pos,
- u"cast() takes exactly two arguments and an optional typecheck keyword")
- else:
- type = args[0].analyse_as_type(self.current_env())
- if type:
- typecheck = kwargs.get('typecheck', False)
- node = ExprNodes.TypecastNode(
- node.function.pos, type=type, operand=args[1], typecheck=typecheck)
+ if (len(args) != 2 or len(kwargs) > 1 or
+ (len(kwargs) == 1 and 'typecheck' not in kwargs)):
+ error(node.function.pos,
+ u"cast() takes exactly two arguments and an optional typecheck keyword")
+ else:
+ type = args[0].analyse_as_type(self.current_env())
+ if type:
+ typecheck = kwargs.get('typecheck', False)
+ node = ExprNodes.TypecastNode(
+ node.function.pos, type=type, operand=args[1], typecheck=typecheck)
else:
- error(args[0].pos, "Not a type")
+ error(args[0].pos, "Not a type")
self.visitchildren(node)
return node
@@ -3239,9 +3239,9 @@ class ReplaceFusedTypeChecks(VisitorTransform):
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
- with Errors.local_errors(ignore=True):
- type1 = node.operand1.analyse_as_type(self.local_scope)
- type2 = node.operand2.analyse_as_type(self.local_scope)
+ with Errors.local_errors(ignore=True):
+ type1 = node.operand1.analyse_as_type(self.local_scope)
+ type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.pxd b/contrib/tools/cython/Cython/Compiler/Parsing.pxd
index 25453b39ab..e80ffc2dd6 100644
--- a/contrib/tools/cython/Cython/Compiler/Parsing.pxd
+++ b/contrib/tools/cython/Cython/Compiler/Parsing.pxd
@@ -68,12 +68,12 @@ cdef p_opt_string_literal(PyrexScanner s, required_type=*)
cdef bint check_for_non_ascii_characters(unicode string)
@cython.locals(systr=unicode, is_python3_source=bint, is_raw=bint)
cdef p_string_literal(PyrexScanner s, kind_override=*)
-cdef _append_escape_sequence(kind, builder, unicode escape_sequence, PyrexScanner s)
+cdef _append_escape_sequence(kind, builder, unicode escape_sequence, PyrexScanner s)
cdef tuple _f_string_error_pos(pos, string, Py_ssize_t i)
@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, next_start=Py_ssize_t)
-cdef list p_f_string(PyrexScanner s, unicode_value, pos, bint is_raw)
+cdef list p_f_string(PyrexScanner s, unicode_value, pos, bint is_raw)
@cython.locals(i=Py_ssize_t, size=Py_ssize_t, c=Py_UCS4, quote_char=Py_UCS4, NO_CHAR=Py_UCS4)
-cdef tuple p_f_string_expr(PyrexScanner s, unicode_value, pos, Py_ssize_t starting_index, bint is_raw)
+cdef tuple p_f_string_expr(PyrexScanner s, unicode_value, pos, Py_ssize_t starting_index, bint is_raw)
cdef p_list_maker(PyrexScanner s)
cdef p_comp_iter(PyrexScanner s, body)
cdef p_comp_for(PyrexScanner s, body)
diff --git a/contrib/tools/cython/Cython/Compiler/Parsing.py b/contrib/tools/cython/Cython/Compiler/Parsing.py
index 4d2f12a24a..5bf2fad6fd 100644
--- a/contrib/tools/cython/Cython/Compiler/Parsing.py
+++ b/contrib/tools/cython/Cython/Compiler/Parsing.py
@@ -9,17 +9,17 @@ from __future__ import absolute_import
import cython
cython.declare(Nodes=object, ExprNodes=object, EncodedString=object,
bytes_literal=object, StringEncoding=object,
- FileSourceDescriptor=object, lookup_unicodechar=object, unicode_category=object,
+ FileSourceDescriptor=object, lookup_unicodechar=object, unicode_category=object,
Future=object, Options=object, error=object, warning=object,
- Builtin=object, ModuleNode=object, Utils=object, _unicode=object, _bytes=object,
- re=object, sys=object, _parse_escape_sequences=object, _parse_escape_sequences_raw=object,
+ Builtin=object, ModuleNode=object, Utils=object, _unicode=object, _bytes=object,
+ re=object, sys=object, _parse_escape_sequences=object, _parse_escape_sequences_raw=object,
partial=object, reduce=object, _IS_PY3=cython.bint, _IS_2BYTE_UNICODE=cython.bint,
_CDEF_MODIFIERS=tuple)
from io import StringIO
import re
import sys
-from unicodedata import lookup as lookup_unicodechar, category as unicode_category
+from unicodedata import lookup as lookup_unicodechar, category as unicode_category
from functools import partial, reduce
from .Scanning import PyrexScanner, FileSourceDescriptor, StringSourceDescriptor
@@ -35,7 +35,7 @@ from . import Future
from . import Options
_IS_PY3 = sys.version_info[0] >= 3
-_IS_2BYTE_UNICODE = sys.maxunicode == 0xffff
+_IS_2BYTE_UNICODE = sys.maxunicode == 0xffff
_CDEF_MODIFIERS = ('inline', 'nogil', 'api')
@@ -503,7 +503,7 @@ def p_call_parse_args(s, allow_genexp=True):
break
s.next()
- if s.sy in ('for', 'async'):
+ if s.sy in ('for', 'async'):
if not keyword_args and not last_was_tuple_unpack:
if len(positional_args) == 1 and len(positional_args[0]) == 1:
positional_args = [[p_genexp(s, positional_args[0][0])]]
@@ -706,17 +706,17 @@ def p_atom(s):
elif sy == 'IDENT':
name = s.systring
if name == "None":
- result = ExprNodes.NoneNode(pos)
+ result = ExprNodes.NoneNode(pos)
elif name == "True":
- result = ExprNodes.BoolNode(pos, value=True)
+ result = ExprNodes.BoolNode(pos, value=True)
elif name == "False":
- result = ExprNodes.BoolNode(pos, value=False)
+ result = ExprNodes.BoolNode(pos, value=False)
elif name == "NULL" and not s.in_python_file:
- result = ExprNodes.NullNode(pos)
+ result = ExprNodes.NullNode(pos)
else:
- result = p_name(s, name)
- s.next()
- return result
+ result = p_name(s, name)
+ s.next()
+ return result
else:
s.error("Expected an identifier or literal")
@@ -774,15 +774,15 @@ def wrap_compile_time_constant(pos, value):
return ExprNodes.IntNode(pos, value=rep, constant_result=value)
elif isinstance(value, float):
return ExprNodes.FloatNode(pos, value=rep, constant_result=value)
- elif isinstance(value, complex):
- node = ExprNodes.ImagNode(pos, value=repr(value.imag), constant_result=complex(0.0, value.imag))
- if value.real:
- # FIXME: should we care about -0.0 ?
- # probably not worth using the '-' operator for negative imag values
- node = ExprNodes.binop_node(
- pos, '+', ExprNodes.FloatNode(pos, value=repr(value.real), constant_result=value.real), node,
- constant_result=value)
- return node
+ elif isinstance(value, complex):
+ node = ExprNodes.ImagNode(pos, value=repr(value.imag), constant_result=complex(0.0, value.imag))
+ if value.real:
+ # FIXME: should we care about -0.0 ?
+ # probably not worth using the '-' operator for negative imag values
+ node = ExprNodes.binop_node(
+ pos, '+', ExprNodes.FloatNode(pos, value=repr(value.real), constant_result=value.real), node,
+ constant_result=value)
+ return node
elif isinstance(value, _unicode):
return ExprNodes.UnicodeNode(pos, value=EncodedString(value))
elif isinstance(value, _bytes):
@@ -824,8 +824,8 @@ def p_cat_string_literal(s):
if set([kind, next_kind]) in (set(['f', 'u']), set(['f', ''])):
kind = 'f'
else:
- error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" % (
- kind, next_kind))
+ error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" % (
+ kind, next_kind))
continue
bstrings.append(next_bytes_value)
ustrings.append(next_unicode_value)
@@ -884,15 +884,15 @@ def p_string_literal(s, kind_override=None):
has_non_ascii_literal_characters = False
string_start_pos = (pos[0], pos[1], pos[2] + len(s.systring))
kind_string = s.systring.rstrip('"\'').lower()
- if len(kind_string) > 1:
- if len(set(kind_string)) != len(kind_string):
- error(pos, 'Duplicate string prefix character')
- if 'b' in kind_string and 'u' in kind_string:
- error(pos, 'String prefixes b and u cannot be combined')
- if 'b' in kind_string and 'f' in kind_string:
- error(pos, 'String prefixes b and f cannot be combined')
- if 'u' in kind_string and 'f' in kind_string:
- error(pos, 'String prefixes u and f cannot be combined')
+ if len(kind_string) > 1:
+ if len(set(kind_string)) != len(kind_string):
+ error(pos, 'Duplicate string prefix character')
+ if 'b' in kind_string and 'u' in kind_string:
+ error(pos, 'String prefixes b and u cannot be combined')
+ if 'b' in kind_string and 'f' in kind_string:
+ error(pos, 'String prefixes b and f cannot be combined')
+ if 'u' in kind_string and 'f' in kind_string:
+ error(pos, 'String prefixes u and f cannot be combined')
is_raw = 'r' in kind_string
@@ -900,11 +900,11 @@ def p_string_literal(s, kind_override=None):
# this should never happen, since the lexer does not allow combining c
# with other prefix characters
if len(kind_string) != 1:
- error(pos, 'Invalid string prefix for character literal')
+ error(pos, 'Invalid string prefix for character literal')
kind = 'c'
elif 'f' in kind_string:
- kind = 'f' # u is ignored
- is_raw = True # postpone the escape resolution
+ kind = 'f' # u is ignored
+ is_raw = True # postpone the escape resolution
elif 'b' in kind_string:
kind = 'b'
elif 'u' in kind_string:
@@ -935,13 +935,13 @@ def p_string_literal(s, kind_override=None):
if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr):
has_non_ascii_literal_characters = True
elif sy == 'ESCAPE':
- # in Py2, 'ur' raw unicode strings resolve unicode escapes but nothing else
- if is_raw and (is_python3_source or kind != 'u' or systr[1] not in u'Uu'):
+ # in Py2, 'ur' raw unicode strings resolve unicode escapes but nothing else
+ if is_raw and (is_python3_source or kind != 'u' or systr[1] not in u'Uu'):
chars.append(systr)
- if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr):
+ if is_python3_source and not has_non_ascii_literal_characters and check_for_non_ascii_characters(systr):
has_non_ascii_literal_characters = True
else:
- _append_escape_sequence(kind, chars, systr, s)
+ _append_escape_sequence(kind, chars, systr, s)
elif sy == 'NEWLINE':
chars.append(u'\n')
elif sy == 'END_STRING':
@@ -949,8 +949,8 @@ def p_string_literal(s, kind_override=None):
elif sy == 'EOF':
s.error("Unclosed string literal", pos=pos)
else:
- s.error("Unexpected token %r:%r in string literal" % (
- sy, s.systring))
+ s.error("Unexpected token %r:%r in string literal" % (
+ sy, s.systring))
if kind == 'c':
unicode_value = None
@@ -963,7 +963,7 @@ def p_string_literal(s, kind_override=None):
and is_python3_source and Future.unicode_literals in s.context.future_directives):
# Python 3 forbids literal non-ASCII characters in byte strings
if kind == 'b':
- s.error("bytes can only contain ASCII literal characters.", pos=pos)
+ s.error("bytes can only contain ASCII literal characters.", pos=pos)
bytes_value = None
if kind == 'f':
unicode_value = p_f_string(s, unicode_value, string_start_pos, is_raw='r' in kind_string)
@@ -971,125 +971,125 @@ def p_string_literal(s, kind_override=None):
return (kind, bytes_value, unicode_value)
-def _append_escape_sequence(kind, builder, escape_sequence, s):
- c = escape_sequence[1]
- if c in u"01234567":
- builder.append_charval(int(escape_sequence[1:], 8))
- elif c in u"'\"\\":
- builder.append(c)
- elif c in u"abfnrtv":
- builder.append(StringEncoding.char_from_escape_sequence(escape_sequence))
- elif c == u'\n':
- pass # line continuation
- elif c == u'x': # \xXX
- if len(escape_sequence) == 4:
- builder.append_charval(int(escape_sequence[2:], 16))
- else:
- s.error("Invalid hex escape '%s'" % escape_sequence, fatal=False)
- elif c in u'NUu' and kind in ('u', 'f', ''): # \uxxxx, \Uxxxxxxxx, \N{...}
- chrval = -1
- if c == u'N':
- uchar = None
- try:
- uchar = lookup_unicodechar(escape_sequence[3:-1])
- chrval = ord(uchar)
- except KeyError:
- s.error("Unknown Unicode character name %s" %
- repr(escape_sequence[3:-1]).lstrip('u'), fatal=False)
- except TypeError:
- # 2-byte unicode build of CPython?
- if (uchar is not None and _IS_2BYTE_UNICODE and len(uchar) == 2 and
- unicode_category(uchar[0]) == 'Cs' and unicode_category(uchar[1]) == 'Cs'):
- # surrogate pair instead of single character
- chrval = 0x10000 + (ord(uchar[0]) - 0xd800) >> 10 + (ord(uchar[1]) - 0xdc00)
- else:
- raise
- elif len(escape_sequence) in (6, 10):
- chrval = int(escape_sequence[2:], 16)
- if chrval > 1114111: # sys.maxunicode:
- s.error("Invalid unicode escape '%s'" % escape_sequence)
- chrval = -1
- else:
- s.error("Invalid unicode escape '%s'" % escape_sequence, fatal=False)
- if chrval >= 0:
- builder.append_uescape(chrval, escape_sequence)
- else:
- builder.append(escape_sequence)
-
-
-_parse_escape_sequences_raw, _parse_escape_sequences = [re.compile((
- # escape sequences:
- br'(\\(?:' +
- (br'\\?' if is_raw else (
- br'[\\abfnrtv"\'{]|'
- br'[0-7]{2,3}|'
- br'N\{[^}]*\}|'
- br'x[0-9a-fA-F]{2}|'
- br'u[0-9a-fA-F]{4}|'
- br'U[0-9a-fA-F]{8}|'
- br'[NxuU]|' # detect invalid escape sequences that do not match above
- )) +
- br')?|'
- # non-escape sequences:
- br'\{\{?|'
- br'\}\}?|'
- br'[^\\{}]+)'
- ).decode('us-ascii')).match
- for is_raw in (True, False)]
-
-
+def _append_escape_sequence(kind, builder, escape_sequence, s):
+ c = escape_sequence[1]
+ if c in u"01234567":
+ builder.append_charval(int(escape_sequence[1:], 8))
+ elif c in u"'\"\\":
+ builder.append(c)
+ elif c in u"abfnrtv":
+ builder.append(StringEncoding.char_from_escape_sequence(escape_sequence))
+ elif c == u'\n':
+ pass # line continuation
+ elif c == u'x': # \xXX
+ if len(escape_sequence) == 4:
+ builder.append_charval(int(escape_sequence[2:], 16))
+ else:
+ s.error("Invalid hex escape '%s'" % escape_sequence, fatal=False)
+ elif c in u'NUu' and kind in ('u', 'f', ''): # \uxxxx, \Uxxxxxxxx, \N{...}
+ chrval = -1
+ if c == u'N':
+ uchar = None
+ try:
+ uchar = lookup_unicodechar(escape_sequence[3:-1])
+ chrval = ord(uchar)
+ except KeyError:
+ s.error("Unknown Unicode character name %s" %
+ repr(escape_sequence[3:-1]).lstrip('u'), fatal=False)
+ except TypeError:
+ # 2-byte unicode build of CPython?
+ if (uchar is not None and _IS_2BYTE_UNICODE and len(uchar) == 2 and
+ unicode_category(uchar[0]) == 'Cs' and unicode_category(uchar[1]) == 'Cs'):
+ # surrogate pair instead of single character
+ chrval = 0x10000 + (ord(uchar[0]) - 0xd800) >> 10 + (ord(uchar[1]) - 0xdc00)
+ else:
+ raise
+ elif len(escape_sequence) in (6, 10):
+ chrval = int(escape_sequence[2:], 16)
+ if chrval > 1114111: # sys.maxunicode:
+ s.error("Invalid unicode escape '%s'" % escape_sequence)
+ chrval = -1
+ else:
+ s.error("Invalid unicode escape '%s'" % escape_sequence, fatal=False)
+ if chrval >= 0:
+ builder.append_uescape(chrval, escape_sequence)
+ else:
+ builder.append(escape_sequence)
+
+
+_parse_escape_sequences_raw, _parse_escape_sequences = [re.compile((
+ # escape sequences:
+ br'(\\(?:' +
+ (br'\\?' if is_raw else (
+ br'[\\abfnrtv"\'{]|'
+ br'[0-7]{2,3}|'
+ br'N\{[^}]*\}|'
+ br'x[0-9a-fA-F]{2}|'
+ br'u[0-9a-fA-F]{4}|'
+ br'U[0-9a-fA-F]{8}|'
+ br'[NxuU]|' # detect invalid escape sequences that do not match above
+ )) +
+ br')?|'
+ # non-escape sequences:
+ br'\{\{?|'
+ br'\}\}?|'
+ br'[^\\{}]+)'
+ ).decode('us-ascii')).match
+ for is_raw in (True, False)]
+
+
def _f_string_error_pos(pos, string, i):
return (pos[0], pos[1], pos[2] + i + 1) # FIXME: handle newlines in string
-def p_f_string(s, unicode_value, pos, is_raw):
+def p_f_string(s, unicode_value, pos, is_raw):
# Parses a PEP 498 f-string literal into a list of nodes. Nodes are either UnicodeNodes
# or FormattedValueNodes.
values = []
- next_start = 0
+ next_start = 0
size = len(unicode_value)
- builder = StringEncoding.UnicodeLiteralBuilder()
- _parse_seq = _parse_escape_sequences_raw if is_raw else _parse_escape_sequences
-
- while next_start < size:
- end = next_start
- match = _parse_seq(unicode_value, next_start)
- if match is None:
+ builder = StringEncoding.UnicodeLiteralBuilder()
+ _parse_seq = _parse_escape_sequences_raw if is_raw else _parse_escape_sequences
+
+ while next_start < size:
+ end = next_start
+ match = _parse_seq(unicode_value, next_start)
+ if match is None:
error(_f_string_error_pos(pos, unicode_value, next_start), "Invalid escape sequence")
-
- next_start = match.end()
- part = match.group()
- c = part[0]
- if c == '\\':
- if not is_raw and len(part) > 1:
- _append_escape_sequence('f', builder, part, s)
- else:
- builder.append(part)
- elif c == '{':
- if part == '{{':
- builder.append('{')
+
+ next_start = match.end()
+ part = match.group()
+ c = part[0]
+ if c == '\\':
+ if not is_raw and len(part) > 1:
+ _append_escape_sequence('f', builder, part, s)
else:
- # start of an expression
- if builder.chars:
- values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
- builder = StringEncoding.UnicodeLiteralBuilder()
- next_start, expr_node = p_f_string_expr(s, unicode_value, pos, next_start, is_raw)
+ builder.append(part)
+ elif c == '{':
+ if part == '{{':
+ builder.append('{')
+ else:
+ # start of an expression
+ if builder.chars:
+ values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
+ builder = StringEncoding.UnicodeLiteralBuilder()
+ next_start, expr_node = p_f_string_expr(s, unicode_value, pos, next_start, is_raw)
values.append(expr_node)
- elif c == '}':
- if part == '}}':
- builder.append('}')
- else:
+ elif c == '}':
+ if part == '}}':
+ builder.append('}')
+ else:
error(_f_string_error_pos(pos, unicode_value, end),
"f-string: single '}' is not allowed")
else:
- builder.append(part)
+ builder.append(part)
- if builder.chars:
- values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
+ if builder.chars:
+ values.append(ExprNodes.UnicodeNode(pos, value=builder.getstring()))
return values
-def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw):
+def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw):
# Parses a {}-delimited expression inside an f-string. Returns a FormattedValueNode
# and the index in the string that follows the expression.
i = starting_index
@@ -1157,10 +1157,10 @@ def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw):
i += 1
if i + 2 > size:
pass # error will be reported below
- else:
- conversion_char = unicode_value[i]
- i += 1
- terminal_char = unicode_value[i]
+ else:
+ conversion_char = unicode_value[i]
+ i += 1
+ terminal_char = unicode_value[i]
if terminal_char == ':':
in_triple_quotes = False
@@ -1206,17 +1206,17 @@ def p_f_string_expr(s, unicode_value, pos, starting_index, is_raw):
# the format spec is itself treated like an f-string
if format_spec_str:
- format_spec = ExprNodes.JoinedStrNode(pos, values=p_f_string(s, format_spec_str, pos, is_raw))
+ format_spec = ExprNodes.JoinedStrNode(pos, values=p_f_string(s, format_spec_str, pos, is_raw))
return i + 1, ExprNodes.FormattedValueNode(
- pos, value=expr, conversion_char=conversion_char, format_spec=format_spec)
+ pos, value=expr, conversion_char=conversion_char, format_spec=format_spec)
# since PEP 448:
# list_display ::= "[" [listmaker] "]"
# listmaker ::= (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
# comp_iter ::= comp_for | comp_if
-# comp_for ::= ["async"] "for" expression_list "in" testlist [comp_iter]
+# comp_for ::= ["async"] "for" expression_list "in" testlist [comp_iter]
# comp_if ::= "if" test [comp_iter]
def p_list_maker(s):
@@ -1228,7 +1228,7 @@ def p_list_maker(s):
return ExprNodes.ListNode(pos, args=[])
expr = p_test_or_starred_expr(s)
- if s.sy in ('for', 'async'):
+ if s.sy in ('for', 'async'):
if expr.is_starred:
s.error("iterable unpacking cannot be used in comprehension")
append = ExprNodes.ComprehensionAppendNode(pos, expr=expr)
@@ -1250,7 +1250,7 @@ def p_list_maker(s):
def p_comp_iter(s, body):
- if s.sy in ('for', 'async'):
+ if s.sy in ('for', 'async'):
return p_comp_for(s, body)
elif s.sy == 'if':
return p_comp_if(s, body)
@@ -1259,17 +1259,17 @@ def p_comp_iter(s, body):
return body
def p_comp_for(s, body):
- pos = s.position()
- # [async] for ...
- is_async = False
- if s.sy == 'async':
- is_async = True
- s.next()
-
+ pos = s.position()
+ # [async] for ...
+ is_async = False
+ if s.sy == 'async':
+ is_async = True
+ s.next()
+
# s.sy == 'for'
- s.expect('for')
- kw = p_for_bounds(s, allow_testlist=False, is_async=is_async)
- kw.update(else_clause=None, body=p_comp_iter(s, body), is_async=is_async)
+ s.expect('for')
+ kw = p_for_bounds(s, allow_testlist=False, is_async=is_async)
+ kw.update(else_clause=None, body=p_comp_iter(s, body), is_async=is_async)
return Nodes.ForStatNode(pos, **kw)
def p_comp_if(s, body):
@@ -1337,7 +1337,7 @@ def p_dict_or_set_maker(s):
else:
break
- if s.sy in ('for', 'async'):
+ if s.sy in ('for', 'async'):
# dict/set comprehension
if len(parts) == 1 and isinstance(parts[0], list) and len(parts[0]) == 1:
item = parts[0][0]
@@ -1467,13 +1467,13 @@ def p_testlist_comp(s):
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
- elif s.sy in ('for', 'async'):
+ elif s.sy in ('for', 'async'):
return p_genexp(s, expr)
else:
return expr
def p_genexp(s, expr):
- # s.sy == 'async' | 'for'
+ # s.sy == 'async' | 'for'
loop = p_comp_for(s, Nodes.ExprStatNode(
expr.pos, expr = ExprNodes.YieldExprNode(expr.pos, arg=expr)))
return ExprNodes.GeneratorExpressionNode(expr.pos, loop=loop)
@@ -1504,17 +1504,17 @@ def p_nonlocal_statement(s):
def p_expression_or_assignment(s):
- expr = p_testlist_star_expr(s)
- if s.sy == ':' and (expr.is_name or expr.is_subscript or expr.is_attribute):
- s.next()
- expr.annotation = p_test(s)
- if s.sy == '=' and expr.is_starred:
+ expr = p_testlist_star_expr(s)
+ if s.sy == ':' and (expr.is_name or expr.is_subscript or expr.is_attribute):
+ s.next()
+ expr.annotation = p_test(s)
+ if s.sy == '=' and expr.is_starred:
# This is a common enough error to make when learning Cython to let
# it fail as early as possible and give a very clear error message.
s.error("a starred assignment target must be in a list or tuple"
" - maybe you meant to use an index assignment: var[0] = ...",
- pos=expr.pos)
- expr_list = [expr]
+ pos=expr.pos)
+ expr_list = [expr]
while s.sy == '=':
s.next()
if s.sy == 'yield':
@@ -2044,12 +2044,12 @@ def p_include_statement(s, ctx):
if include_file_path:
s.included_files.append(include_file_name)
with Utils.open_source_file(include_file_path) as f:
- if Options.source_root:
- import os
- rel_path = os.path.relpath(include_file_path, Options.source_root)
- else:
- rel_path = None
- source_desc = FileSourceDescriptor(include_file_path, rel_path)
+ if Options.source_root:
+ import os
+ rel_path = os.path.relpath(include_file_path, Options.source_root)
+ else:
+ rel_path = None
+ source_desc = FileSourceDescriptor(include_file_path, rel_path)
s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
tree = p_statement_list(s2, ctx)
return tree
@@ -2178,14 +2178,14 @@ def p_simple_statement_list(s, ctx, first_statement = 0):
stat = stats[0]
else:
stat = Nodes.StatListNode(pos, stats = stats)
-
- if s.sy not in ('NEWLINE', 'EOF'):
- # provide a better error message for users who accidentally write Cython code in .py files
- if isinstance(stat, Nodes.ExprStatNode):
- if stat.expr.is_name and stat.expr.name == 'cdef':
- s.error("The 'cdef' keyword is only allowed in Cython files (pyx/pxi/pxd)", pos)
+
+ if s.sy not in ('NEWLINE', 'EOF'):
+ # provide a better error message for users who accidentally write Cython code in .py files
+ if isinstance(stat, Nodes.ExprStatNode):
+ if stat.expr.is_name and stat.expr.name == 'cdef':
+ s.error("The 'cdef' keyword is only allowed in Cython files (pyx/pxi/pxd)", pos)
s.expect_newline("Syntax error in simple statement list")
-
+
return stat
def p_compile_time_expr(s):
@@ -2202,10 +2202,10 @@ def p_DEF_statement(s):
name = p_ident(s)
s.expect('=')
expr = p_compile_time_expr(s)
- if s.compile_time_eval:
- value = expr.compile_time_value(denv)
- #print "p_DEF_statement: %s = %r" % (name, value) ###
- denv.declare(name, value)
+ if s.compile_time_eval:
+ value = expr.compile_time_value(denv)
+ #print "p_DEF_statement: %s = %r" % (name, value) ###
+ denv.declare(name, value)
s.expect_newline("Expected a newline", ignore_semicolon=True)
return Nodes.PassStatNode(pos)
@@ -2501,12 +2501,12 @@ def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
error(pos, "Expected an identifier, found '%s'" % s.sy)
if s.systring == 'const':
s.next()
- base_type = p_c_base_type(s, self_flag=self_flag, nonempty=nonempty, templates=templates)
- if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
- # reverse order to avoid having to write "(const int)[:]"
- base_type.base_type_node = Nodes.CConstTypeNode(pos, base_type=base_type.base_type_node)
- return base_type
- return Nodes.CConstTypeNode(pos, base_type=base_type)
+ base_type = p_c_base_type(s, self_flag=self_flag, nonempty=nonempty, templates=templates)
+ if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
+ # reverse order to avoid having to write "(const int)[:]"
+ base_type.base_type_node = Nodes.CConstTypeNode(pos, base_type=base_type.base_type_node)
+ return base_type
+ return Nodes.CConstTypeNode(pos, base_type=base_type)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
@@ -2733,7 +2733,7 @@ special_basic_c_types = cython.declare(dict, {
"ssize_t" : (2, 0),
"size_t" : (0, 0),
"ptrdiff_t" : (2, 0),
- "Py_tss_t" : (1, 0),
+ "Py_tss_t" : (1, 0),
})
sign_and_longness_words = cython.declare(
@@ -3023,13 +3023,13 @@ def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0,
if s.sy == '=':
s.next()
if 'pxd' in ctx.level:
- if s.sy in ['*', '?']:
- # TODO(github/1736): Make this an error for inline declarations.
- default = ExprNodes.NoneNode(pos)
- s.next()
- elif 'inline' in ctx.modifiers:
- default = p_test(s)
- else:
+ if s.sy in ['*', '?']:
+ # TODO(github/1736): Make this an error for inline declarations.
+ default = ExprNodes.NoneNode(pos)
+ s.next()
+ elif 'inline' in ctx.modifiers:
+ default = p_test(s)
+ else:
error(pos, "default values cannot be specified in pxd files, use ? or *")
else:
default = p_test(s)
@@ -3108,13 +3108,13 @@ def p_cdef_extern_block(s, pos, ctx):
ctx.namespace = p_string_literal(s, 'u')[2]
if p_nogil(s):
ctx.nogil = 1
-
- # Use "docstring" as verbatim string to include
- verbatim_include, body = p_suite_with_docstring(s, ctx, True)
-
+
+ # Use "docstring" as verbatim string to include
+ verbatim_include, body = p_suite_with_docstring(s, ctx, True)
+
return Nodes.CDefExternNode(pos,
include_file = include_file,
- verbatim_include = verbatim_include,
+ verbatim_include = verbatim_include,
body = body,
namespace = ctx.namespace)
@@ -3270,7 +3270,7 @@ def p_c_func_or_var_declaration(s, pos, ctx):
cmethod_flag = ctx.level in ('c_class', 'c_class_pxd')
modifiers = p_c_modifiers(s)
base_type = p_c_base_type(s, nonempty = 1, templates = ctx.templates)
- declarator = p_c_declarator(s, ctx(modifiers=modifiers), cmethod_flag = cmethod_flag,
+ declarator = p_c_declarator(s, ctx(modifiers=modifiers), cmethod_flag = cmethod_flag,
assignable = 1, nonempty = 1)
declarator.overridable = ctx.overridable
if s.sy == 'IDENT' and s.systring == 'const' and ctx.level == 'cpp_class':
@@ -3488,16 +3488,16 @@ def p_c_class_definition(s, pos, ctx):
as_name = class_name
objstruct_name = None
typeobj_name = None
- bases = None
+ bases = None
check_size = None
if s.sy == '(':
- positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
- if keyword_args:
- s.error("C classes cannot take keyword bases.")
- bases, _ = p_call_build_packed_args(pos, positional_args, keyword_args)
- if bases is None:
- bases = ExprNodes.TupleNode(pos, args=[])
-
+ positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
+ if keyword_args:
+ s.error("C classes cannot take keyword bases.")
+ bases, _ = p_call_build_packed_args(pos, positional_args, keyword_args)
+ if bases is None:
+ bases = ExprNodes.TupleNode(pos, args=[])
+
if s.sy == '[':
if ctx.visibility not in ('public', 'extern') and not ctx.api:
error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
@@ -3537,7 +3537,7 @@ def p_c_class_definition(s, pos, ctx):
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
- bases = bases,
+ bases = bases,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
check_size = check_size,
diff --git a/contrib/tools/cython/Cython/Compiler/Pipeline.py b/contrib/tools/cython/Cython/Compiler/Pipeline.py
index 5194c3e49b..2cb8cb34ce 100644
--- a/contrib/tools/cython/Cython/Compiler/Pipeline.py
+++ b/contrib/tools/cython/Cython/Compiler/Pipeline.py
@@ -144,7 +144,7 @@ def create_pipeline(context, mode, exclude_classes=()):
from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
- from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
+ from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
@@ -182,7 +182,7 @@ def create_pipeline(context, mode, exclude_classes=()):
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
- TrackNumpyAttributes(),
+ TrackNumpyAttributes(),
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
@@ -324,40 +324,40 @@ def insert_into_pipeline(pipeline, transform, before=None, after=None):
# Running a pipeline
#
-_pipeline_entry_points = {}
-
-
+_pipeline_entry_points = {}
+
+
def run_pipeline(pipeline, source, printtree=True):
from .Visitor import PrintTree
- exec_ns = globals().copy() if DebugFlags.debug_verbose_pipeline else None
-
- def run(phase, data):
- return phase(data)
+ exec_ns = globals().copy() if DebugFlags.debug_verbose_pipeline else None
+ def run(phase, data):
+ return phase(data)
+
error = None
data = source
try:
try:
for phase in pipeline:
if phase is not None:
- if not printtree and isinstance(phase, PrintTree):
- continue
+ if not printtree and isinstance(phase, PrintTree):
+ continue
if DebugFlags.debug_verbose_pipeline:
t = time()
print("Entering pipeline phase %r" % phase)
- # create a new wrapper for each step to show the name in profiles
- phase_name = getattr(phase, '__name__', type(phase).__name__)
- try:
- run = _pipeline_entry_points[phase_name]
- except KeyError:
- exec("def %s(phase, data): return phase(data)" % phase_name, exec_ns)
- run = _pipeline_entry_points[phase_name] = exec_ns[phase_name]
- data = run(phase, data)
+ # create a new wrapper for each step to show the name in profiles
+ phase_name = getattr(phase, '__name__', type(phase).__name__)
+ try:
+ run = _pipeline_entry_points[phase_name]
+ except KeyError:
+ exec("def %s(phase, data): return phase(data)" % phase_name, exec_ns)
+ run = _pipeline_entry_points[phase_name] = exec_ns[phase_name]
+ data = run(phase, data)
if DebugFlags.debug_verbose_pipeline:
print(" %.3f seconds" % (time() - t))
except CompileError as err:
# err is set
- Errors.report_error(err, use_stack=False)
+ Errors.report_error(err, use_stack=False)
error = err
except InternalError as err:
# Only raise if there was not an earlier error
diff --git a/contrib/tools/cython/Cython/Compiler/PyrexTypes.py b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py
index 3d4931cea6..1a75d40825 100644
--- a/contrib/tools/cython/Cython/Compiler/PyrexTypes.py
+++ b/contrib/tools/cython/Cython/Compiler/PyrexTypes.py
@@ -28,15 +28,15 @@ class BaseType(object):
# List of attribute names of any subtypes
subtypes = []
_empty_declaration = None
- _specialization_name = None
+ _specialization_name = None
default_format_spec = None
def can_coerce_to_pyobject(self, env):
return False
- def can_coerce_from_pyobject(self, env):
- return False
-
+ def can_coerce_from_pyobject(self, env):
+ return False
+
def can_coerce_to_pystring(self, env, format_spec=None):
return False
@@ -52,15 +52,15 @@ class BaseType(object):
return self._empty_declaration
def specialization_name(self):
- if self._specialization_name is None:
- # This is not entirely robust.
- common_subs = (self.empty_declaration_code()
- .replace("unsigned ", "unsigned_")
- .replace("long long", "long_long")
- .replace(" ", "__"))
- self._specialization_name = re.sub(
- '[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs)
- return self._specialization_name
+ if self._specialization_name is None:
+ # This is not entirely robust.
+ common_subs = (self.empty_declaration_code()
+ .replace("unsigned ", "unsigned_")
+ .replace("long long", "long_long")
+ .replace(" ", "__"))
+ self._specialization_name = re.sub(
+ '[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs)
+ return self._specialization_name
def base_declaration_code(self, base_code, entity_code):
if entity_code:
@@ -189,11 +189,11 @@ class PyrexType(BaseType):
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
- # is_pythran_expr boolean Is Pythran expr
- # is_numpy_buffer boolean Is Numpy array buffer
+ # is_pythran_expr boolean Is Pythran expr
+ # is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes
- # default_value string Initial value that can be assigned before first user assignment.
- # declaration_value string The value statically assigned on declaration (if any).
+ # default_value string Initial value that can be assigned before first user assignment.
+ # declaration_value string The value statically assigned on declaration (if any).
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
@@ -251,11 +251,11 @@ class PyrexType(BaseType):
is_buffer = 0
is_ctuple = 0
is_memoryviewslice = 0
- is_pythran_expr = 0
- is_numpy_buffer = 0
+ is_pythran_expr = 0
+ is_numpy_buffer = 0
has_attributes = 0
default_value = ""
- declaration_value = ""
+ declaration_value = ""
def resolve(self):
# If a typedef, returns the base type.
@@ -316,25 +316,25 @@ class PyrexType(BaseType):
def needs_nonecheck(self):
return 0
- def _assign_from_py_code(self, source_code, result_code, error_pos, code,
- from_py_function=None, error_condition=None, extra_args=None):
- args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
- convert_call = "%s(%s%s)" % (
- from_py_function or self.from_py_function,
- source_code,
- args,
- )
- if self.is_enum:
- convert_call = typecast(self, c_long_type, convert_call)
- return '%s = %s; %s' % (
- result_code,
- convert_call,
- code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
-
-
+ def _assign_from_py_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None, extra_args=None):
+ args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
+ convert_call = "%s(%s%s)" % (
+ from_py_function or self.from_py_function,
+ source_code,
+ args,
+ )
+ if self.is_enum:
+ convert_call = typecast(self, c_long_type, convert_call)
+ return '%s = %s; %s' % (
+ result_code,
+ convert_call,
+ code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+
+
def public_decl(base_code, dll_linkage):
if dll_linkage:
- return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA '))
+ return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA '))
else:
return base_code
@@ -509,10 +509,10 @@ class CTypedefType(BaseType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
return self.typedef_base_type.from_py_call_code(
- source_code, result_code, error_pos, code,
- from_py_function or self.from_py_function,
- error_condition or self.error_condition(result_code)
- )
+ source_code, result_code, error_pos, code,
+ from_py_function or self.from_py_function,
+ error_condition or self.error_condition(result_code)
+ )
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
@@ -554,10 +554,10 @@ class CTypedefType(BaseType):
def can_coerce_to_pyobject(self, env):
return self.typedef_base_type.can_coerce_to_pyobject(env)
- def can_coerce_from_pyobject(self, env):
- return self.typedef_base_type.can_coerce_from_pyobject(env)
-
+ def can_coerce_from_pyobject(self, env):
+ return self.typedef_base_type.can_coerce_from_pyobject(env)
+
class MemoryViewSliceType(PyrexType):
is_memoryviewslice = 1
@@ -635,7 +635,7 @@ class MemoryViewSliceType(PyrexType):
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
- #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
+ #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
@@ -738,9 +738,9 @@ class MemoryViewSliceType(PyrexType):
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
- for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')):
+ for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')):
- is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
+ is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
@@ -754,7 +754,7 @@ class MemoryViewSliceType(PyrexType):
defining=1,
cname=is_contig_name)
- entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim)
+ entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim)
return True
@@ -787,21 +787,21 @@ class MemoryViewSliceType(PyrexType):
src = self
- #if not copying and self.writable_needed and not dst.writable_needed:
- # return False
-
- src_dtype, dst_dtype = src.dtype, dst.dtype
- if dst_dtype.is_const:
- # Requesting read-only views is always ok => consider only the non-const base type.
- dst_dtype = dst_dtype.const_base_type
- if src_dtype.is_const:
- # When assigning between read-only views, compare only the non-const base types.
- src_dtype = src_dtype.const_base_type
- elif copying and src_dtype.is_const:
- # Copying by value => ignore const on source.
- src_dtype = src_dtype.const_base_type
-
- if src_dtype != dst_dtype:
+ #if not copying and self.writable_needed and not dst.writable_needed:
+ # return False
+
+ src_dtype, dst_dtype = src.dtype, dst.dtype
+ if dst_dtype.is_const:
+ # Requesting read-only views is always ok => consider only the non-const base type.
+ dst_dtype = dst_dtype.const_base_type
+ if src_dtype.is_const:
+ # When assigning between read-only views, compare only the non-const base types.
+ src_dtype = src_dtype.const_base_type
+ elif copying and src_dtype.is_const:
+ # Copying by value => ignore const on source.
+ src_dtype = src_dtype.const_base_type
+
+ if src_dtype != dst_dtype:
return False
if src.ndim != dst.ndim:
@@ -875,9 +875,9 @@ class MemoryViewSliceType(PyrexType):
def can_coerce_to_pyobject(self, env):
return True
- def can_coerce_from_pyobject(self, env):
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ return True
+
def check_for_null_code(self, cname):
return cname + '.memview'
@@ -919,12 +919,12 @@ class MemoryViewSliceType(PyrexType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- # NOTE: auto-detection of readonly buffers is disabled:
- # writable = self.writable_needed or not self.dtype.is_const
- writable = not self.dtype.is_const
- return self._assign_from_py_code(
- source_code, result_code, error_pos, code, from_py_function, error_condition,
- extra_args=['PyBUF_WRITABLE' if writable else '0'])
+ # NOTE: auto-detection of readonly buffers is disabled:
+ # writable = self.writable_needed or not self.dtype.is_const
+ writable = not self.dtype.is_const
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition,
+ extra_args=['PyBUF_WRITABLE' if writable else '0'])
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
@@ -952,29 +952,29 @@ class MemoryViewSliceType(PyrexType):
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
- self.dtype.create_to_py_utility_code(env)
- to_py_function = self.dtype.to_py_function
-
- from_py_function = None
- if not self.dtype.is_const:
- self.dtype.create_from_py_utility_code(env)
- from_py_function = self.dtype.from_py_function
-
- if not (to_py_function or from_py_function):
+ self.dtype.create_to_py_utility_code(env)
+ to_py_function = self.dtype.to_py_function
+
+ from_py_function = None
+ if not self.dtype.is_const:
+ self.dtype.create_from_py_utility_code(env)
+ from_py_function = self.dtype.from_py_function
+
+ if not (to_py_function or from_py_function):
return "NULL", "NULL"
- if not to_py_function:
+ if not to_py_function:
get_function = "NULL"
- if not from_py_function:
+ if not from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
- to_py_function=to_py_function,
- from_py_function=from_py_function,
- dtype=self.dtype.empty_declaration_code(),
- error_condition=error_condition,
+ to_py_function=to_py_function,
+ from_py_function=from_py_function,
+ dtype=self.dtype.empty_declaration_code(),
+ error_condition=error_condition,
)
utility = TempitaUtilityCode.load_cached(
@@ -1059,14 +1059,14 @@ class BufferType(BaseType):
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
- self.is_numpy_buffer = self.base.name == "ndarray"
+ self.is_numpy_buffer = self.base.name == "ndarray"
def can_coerce_to_pyobject(self,env):
return True
- def can_coerce_from_pyobject(self,env):
- return True
-
+ def can_coerce_from_pyobject(self,env):
+ return True
+
def as_argument_type(self):
return self
@@ -1124,7 +1124,7 @@ class PyObjectType(PyrexType):
name = "object"
is_pyobject = 1
default_value = "0"
- declaration_value = "0"
+ declaration_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
@@ -1139,9 +1139,9 @@ class PyObjectType(PyrexType):
def can_coerce_to_pyobject(self, env):
return True
- def can_coerce_from_pyobject(self, env):
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ return True
+
def default_coerced_ctype(self):
"""The default C type that this Python type coerces to, or None."""
return None
@@ -1344,13 +1344,13 @@ class PyExtensionType(PyObjectType):
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
- # early_init boolean Whether to initialize early (as opposed to during module execution).
+ # early_init boolean Whether to initialize early (as opposed to during module execution).
# defered_declarations [thunk] Used to declare class hierarchies in order
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
is_extension_type = 1
has_attributes = 1
- early_init = 1
+ early_init = 1
objtypedef_cname = None
@@ -1478,9 +1478,9 @@ class CType(PyrexType):
def can_coerce_to_pyobject(self, env):
return self.create_to_py_utility_code(env)
- def can_coerce_from_pyobject(self, env):
- return self.create_from_py_utility_code(env)
-
+ def can_coerce_from_pyobject(self, env):
+ return self.create_from_py_utility_code(env)
+
def error_condition(self, result_code):
conds = []
if self.is_string or self.is_pyunicode_ptr:
@@ -1511,53 +1511,53 @@ class CType(PyrexType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- return self._assign_from_py_code(
- source_code, result_code, error_pos, code, from_py_function, error_condition)
-
-
-
-class PythranExpr(CType):
- # Pythran object of a given type
-
- to_py_function = "__Pyx_pythran_to_python"
- is_pythran_expr = True
- writable = True
- has_attributes = 1
-
- def __init__(self, pythran_type, org_buffer=None):
- self.org_buffer = org_buffer
- self.pythran_type = pythran_type
- self.name = self.pythran_type
- self.cname = self.pythran_type
- self.from_py_function = "from_python<%s>" % (self.pythran_type)
- self.scope = None
-
- def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
- assert not pyrex
- return "%s %s" % (self.cname, entity_code)
-
- def attributes_known(self):
- if self.scope is None:
- from . import Symtab
- # FIXME: fake C scope, might be better represented by a struct or C++ class scope
- self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
- scope.parent_type = self
- scope.directives = {}
- scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True)
- scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True)
-
- return True
-
- def __eq__(self, other):
- return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
-
- def __ne__(self, other):
- return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
-
- def __hash__(self):
- return hash(self.pythran_type)
-
-
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition)
+
+
+
+class PythranExpr(CType):
+ # Pythran object of a given type
+
+ to_py_function = "__Pyx_pythran_to_python"
+ is_pythran_expr = True
+ writable = True
+ has_attributes = 1
+
+ def __init__(self, pythran_type, org_buffer=None):
+ self.org_buffer = org_buffer
+ self.pythran_type = pythran_type
+ self.name = self.pythran_type
+ self.cname = self.pythran_type
+ self.from_py_function = "from_python<%s>" % (self.pythran_type)
+ self.scope = None
+
+ def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
+ assert not pyrex
+ return "%s %s" % (self.cname, entity_code)
+
+ def attributes_known(self):
+ if self.scope is None:
+ from . import Symtab
+ # FIXME: fake C scope, might be better represented by a struct or C++ class scope
+ self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
+ scope.parent_type = self
+ scope.directives = {}
+ scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True)
+ scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True)
+
+ return True
+
+ def __eq__(self, other):
+ return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
+
+ def __ne__(self, other):
+ return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
+
+ def __hash__(self):
+ return hash(self.pythran_type)
+
+
class CConstType(BaseType):
is_const = 1
@@ -1594,20 +1594,20 @@ class CConstType(BaseType):
def can_coerce_to_pyobject(self, env):
return self.const_base_type.can_coerce_to_pyobject(env)
- def can_coerce_from_pyobject(self, env):
- return self.const_base_type.can_coerce_from_pyobject(env)
-
+ def can_coerce_from_pyobject(self, env):
+ return self.const_base_type.can_coerce_from_pyobject(env)
+
def create_to_py_utility_code(self, env):
if self.const_base_type.create_to_py_utility_code(env):
self.to_py_function = self.const_base_type.to_py_function
return True
- def same_as_resolved_type(self, other_type):
- if other_type.is_const:
- return self.const_base_type.same_as_resolved_type(other_type.const_base_type)
- # Accept const LHS <- non-const RHS.
- return self.const_base_type.same_as_resolved_type(other_type)
-
+ def same_as_resolved_type(self, other_type):
+ if other_type.is_const:
+ return self.const_base_type.same_as_resolved_type(other_type.const_base_type)
+ # Accept const LHS <- non-const RHS.
+ return self.const_base_type.same_as_resolved_type(other_type)
+
def __getattr__(self, name):
return getattr(self.const_base_type, name)
@@ -1778,9 +1778,9 @@ class ForbidUseClass:
ForbidUse = ForbidUseClass()
-class CIntLike(object):
- """Mixin for shared behaviour of C integers and enums.
- """
+class CIntLike(object):
+ """Mixin for shared behaviour of C integers and enums.
+ """
to_py_function = None
from_py_function = None
to_pyunicode_utility = None
@@ -1789,27 +1789,27 @@ class CIntLike(object):
def can_coerce_to_pyobject(self, env):
return True
- def can_coerce_from_pyobject(self, env):
- return True
-
- def create_to_py_utility_code(self, env):
- if type(self).to_py_function is None:
- self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntToPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "TO_PY_FUNCTION": self.to_py_function}))
- return True
-
- def create_from_py_utility_code(self, env):
- if type(self).from_py_function is None:
- self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntFromPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "FROM_PY_FUNCTION": self.from_py_function}))
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if type(self).to_py_function is None:
+ self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntToPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": self.to_py_function}))
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if type(self).from_py_function is None:
+ self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntFromPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "FROM_PY_FUNCTION": self.from_py_function}))
+ return True
+
@staticmethod
def _parse_format(format_spec):
padding = ' '
@@ -1853,12 +1853,12 @@ class CIntLike(object):
return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)
-class CIntType(CIntLike, CNumericType):
-
- is_int = 1
- typedef_flag = 0
- exception_value = -1
+class CIntType(CIntLike, CNumericType):
+ is_int = 1
+ typedef_flag = 0
+ exception_value = -1
+
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
@@ -2171,8 +2171,8 @@ class CComplexType(CNumericType):
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
and src_type.typedef_is_external):
return False
- elif src_type.is_pyobject:
- return True
+ elif src_type.is_pyobject:
+ return True
else:
return super(CComplexType, self).assignable_from(src_type)
@@ -2225,9 +2225,9 @@ class CComplexType(CNumericType):
def can_coerce_to_pyobject(self, env):
return True
- def can_coerce_from_pyobject(self, env):
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ return True
+
def create_to_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c'))
return True
@@ -2274,25 +2274,25 @@ complex_ops = {
}
-class CPyTSSTType(CType):
- #
- # PEP-539 "Py_tss_t" type
- #
-
- declaration_value = "Py_tss_NEEDS_INIT"
-
- def __repr__(self):
- return "<Py_tss_t>"
-
- def declaration_code(self, entity_code,
- for_display=0, dll_linkage=None, pyrex=0):
- if pyrex or for_display:
- base_code = "Py_tss_t"
- else:
- base_code = public_decl("Py_tss_t", dll_linkage)
- return self.base_declaration_code(base_code, entity_code)
-
-
+class CPyTSSTType(CType):
+ #
+ # PEP-539 "Py_tss_t" type
+ #
+
+ declaration_value = "Py_tss_NEEDS_INIT"
+
+ def __repr__(self):
+ return "<Py_tss_t>"
+
+ def declaration_code(self, entity_code,
+ for_display=0, dll_linkage=None, pyrex=0):
+ if pyrex or for_display:
+ base_code = "Py_tss_t"
+ else:
+ base_code = public_decl("Py_tss_t", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+
class CPointerBaseType(CType):
# common base type for pointer/array types
#
@@ -2316,17 +2316,17 @@ class CPointerBaseType(CType):
if base_type.signed == 2:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
- self.from_py_function = "__Pyx_PyObject_As%sSString"
+ self.from_py_function = "__Pyx_PyObject_As%sSString"
elif base_type.signed:
self.to_py_function = "__Pyx_PyObject_FromString"
if self.is_ptr:
- self.from_py_function = "__Pyx_PyObject_As%sString"
+ self.from_py_function = "__Pyx_PyObject_As%sString"
else:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
- self.from_py_function = "__Pyx_PyObject_As%sUString"
- if self.is_ptr:
- self.from_py_function %= '' if self.base_type.is_const else 'Writable'
+ self.from_py_function = "__Pyx_PyObject_As%sUString"
+ if self.is_ptr:
+ self.from_py_function %= '' if self.base_type.is_const else 'Writable'
self.exception_value = "NULL"
elif self.is_pyunicode_ptr and not base_type.is_error:
self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
@@ -2415,21 +2415,21 @@ class CArrayType(CPointerBaseType):
if isinstance(actual, CArrayType):
return self.base_type.deduce_template_params(actual.base_type)
else:
- return {}
+ return {}
def can_coerce_to_pyobject(self, env):
return self.base_type.can_coerce_to_pyobject(env)
- def can_coerce_from_pyobject(self, env):
- return self.base_type.can_coerce_from_pyobject(env)
-
+ def can_coerce_from_pyobject(self, env):
+ return self.base_type.can_coerce_from_pyobject(env)
+
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return self.to_py_function
if not self.base_type.create_to_py_utility_code(env):
return False
- safe_typename = self.base_type.specialization_name()
+ safe_typename = self.base_type.specialization_name()
to_py_function = "__Pyx_carray_to_py_%s" % safe_typename
to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename
@@ -2437,7 +2437,7 @@ class CArrayType(CPointerBaseType):
context = {
'cname': to_py_function,
'to_tuple_cname': to_tuple_function,
- 'base_type': self.base_type,
+ 'base_type': self.base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.to_py", "CConvert.pyx",
@@ -2467,12 +2467,12 @@ class CArrayType(CPointerBaseType):
if not self.base_type.create_from_py_utility_code(env):
return False
- from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name()
+ from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name()
from .UtilityCode import CythonUtilityCode
context = {
'cname': from_py_function,
- 'base_type': self.base_type,
+ 'base_type': self.base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.from_py", "CConvert.pyx",
@@ -2483,7 +2483,7 @@ class CArrayType(CPointerBaseType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- assert not error_condition, '%s: %s' % (error_pos, error_condition)
+ assert not error_condition, '%s: %s' % (error_pos, error_condition)
call_code = "%s(%s, %s, %s)" % (
from_py_function or self.from_py_function,
source_code, result_code, self.size)
@@ -2554,7 +2554,7 @@ class CPtrType(CPointerBaseType):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
- return {}
+ return {}
def invalid_value(self):
return "1"
@@ -2683,19 +2683,19 @@ class CFuncType(CType):
",".join(arg_reprs),
except_clause)
- def with_with_gil(self, with_gil):
- if with_gil == self.with_gil:
- return self
- else:
- return CFuncType(
- self.return_type, self.args, self.has_varargs,
- self.exception_value, self.exception_check,
- self.calling_convention, self.nogil,
- with_gil,
- self.is_overridable, self.optional_arg_count,
- self.is_const_method, self.is_static_method,
- self.templates, self.is_strict_signature)
-
+ def with_with_gil(self, with_gil):
+ if with_gil == self.with_gil:
+ return self
+ else:
+ return CFuncType(
+ self.return_type, self.args, self.has_varargs,
+ self.exception_value, self.exception_check,
+ self.calling_convention, self.nogil,
+ with_gil,
+ self.is_overridable, self.optional_arg_count,
+ self.is_const_method, self.is_static_method,
+ self.templates, self.is_strict_signature)
+
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
@@ -2710,11 +2710,11 @@ class CFuncType(CType):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
- def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False,
- exact_semantics=True):
- # If 'exact_semantics' is false, allow any equivalent C signatures
- # if the Cython semantics are compatible, i.e. the same or wider for 'other_type'.
-
+ def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False,
+ exact_semantics=True):
+ # If 'exact_semantics' is false, allow any equivalent C signatures
+ # if the Cython semantics are compatible, i.e. the same or wider for 'other_type'.
+
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
@@ -2736,21 +2736,21 @@ class CFuncType(CType):
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
- if as_pxd_definition:
- # A narrowing of the return type declared in the pxd is allowed.
- if not self.return_type.subtype_of_resolved_type(other_type.return_type):
- return 0
- else:
- if not self.return_type.same_as(other_type.return_type):
- return 0
+ if as_pxd_definition:
+ # A narrowing of the return type declared in the pxd is allowed.
+ if not self.return_type.subtype_of_resolved_type(other_type.return_type):
+ return 0
+ else:
+ if not self.return_type.same_as(other_type.return_type):
+ return 0
if not self.same_calling_convention_as(other_type):
return 0
- if exact_semantics:
- if self.exception_check != other_type.exception_check:
- return 0
- if not self._same_exception_value(other_type.exception_value):
- return 0
- elif not self._is_exception_compatible_with(other_type):
+ if exact_semantics:
+ if self.exception_check != other_type.exception_check:
+ return 0
+ if not self._same_exception_value(other_type.exception_value):
+ return 0
+ elif not self._is_exception_compatible_with(other_type):
return 0
return 1
@@ -2802,25 +2802,25 @@ class CFuncType(CType):
return 0
if self.nogil != other_type.nogil:
return 0
- if not self._is_exception_compatible_with(other_type):
+ if not self._is_exception_compatible_with(other_type):
return 0
self.original_sig = other_type.original_sig or other_type
return 1
- def _is_exception_compatible_with(self, other_type):
- # narrower exception checks are ok, but prevent mismatches
- if self.exception_check == '+' and other_type.exception_check != '+':
- # must catch C++ exceptions if we raise them
- return 0
- if not other_type.exception_check or other_type.exception_value is not None:
- # if other does not *always* check exceptions, self must comply
- if not self._same_exception_value(other_type.exception_value):
- return 0
- if self.exception_check and self.exception_check != other_type.exception_check:
- # a redundant exception check doesn't make functions incompatible, but a missing one does
- return 0
- return 1
-
+ def _is_exception_compatible_with(self, other_type):
+ # narrower exception checks are ok, but prevent mismatches
+ if self.exception_check == '+' and other_type.exception_check != '+':
+ # must catch C++ exceptions if we raise them
+ return 0
+ if not other_type.exception_check or other_type.exception_value is not None:
+ # if other does not *always* check exceptions, self must comply
+ if not self._same_exception_value(other_type.exception_value):
+ return 0
+ if self.exception_check and self.exception_check != other_type.exception_check:
+ # a redundant exception check doesn't make functions incompatible, but a missing one does
+ return 0
+ return 1
+
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
@@ -2865,18 +2865,18 @@ class CFuncType(CType):
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
- def same_as_resolved_type(self, other_type, as_cmethod=False):
- return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \
+ def same_as_resolved_type(self, other_type, as_cmethod=False):
+ return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \
and self.nogil == other_type.nogil
- def pointer_assignable_from_resolved_type(self, rhs_type):
- # Accept compatible exception/nogil declarations for the RHS.
- if rhs_type is error_type:
- return 1
- if not rhs_type.is_cfunction:
- return 0
- return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \
- and not (self.nogil and not rhs_type.nogil)
+ def pointer_assignable_from_resolved_type(self, rhs_type):
+ # Accept compatible exception/nogil declarations for the RHS.
+ if rhs_type is error_type:
+ return 1
+ if not rhs_type.is_cfunction:
+ return 0
+ return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \
+ and not (self.nogil and not rhs_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
@@ -2991,7 +2991,7 @@ class CFuncType(CType):
result = []
permutations = self.get_all_specialized_permutations()
- new_cfunc_entries = []
+ new_cfunc_entries = []
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
@@ -3006,16 +3006,16 @@ class CFuncType(CType):
new_func_type.entry = new_entry
result.append(new_func_type)
- new_cfunc_entries.append(new_entry)
-
- cfunc_entries = self.entry.scope.cfunc_entries
- try:
- cindex = cfunc_entries.index(self.entry)
- except ValueError:
- cfunc_entries.extend(new_cfunc_entries)
- else:
- cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+ new_cfunc_entries.append(new_entry)
+ cfunc_entries = self.entry.scope.cfunc_entries
+ try:
+ cindex = cfunc_entries.index(self.entry)
+ except ValueError:
+ cfunc_entries.extend(new_cfunc_entries)
+ else:
+ cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+
self.cached_specialized_types = result
return result
@@ -3228,18 +3228,18 @@ class CFuncTypeArg(BaseType):
or_none = False
accept_none = True
accept_builtin_subtypes = False
- annotation = None
+ annotation = None
subtypes = ['type']
- def __init__(self, name, type, pos, cname=None, annotation=None):
+ def __init__(self, name, type, pos, cname=None, annotation=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
- if annotation is not None:
- self.annotation = annotation
+ if annotation is not None:
+ self.annotation = annotation
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
@@ -3253,7 +3253,7 @@ class CFuncTypeArg(BaseType):
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
-
+
class ToPyStructUtilityCode(object):
requires = None
@@ -3281,8 +3281,8 @@ class ToPyStructUtilityCode(object):
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
- code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" %
- len(self.type.scope.var_entries))
+ code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" %
+ len(self.type.scope.var_entries))
for member in self.type.scope.var_entries:
nameconst_cname = code.get_py_string_const(member.name, identifier=True)
code.putln("%s; if (unlikely(!member)) goto bad;" % (
@@ -3336,10 +3336,10 @@ class CStructOrUnionType(CType):
self._convert_from_py_code = None
self.packed = packed
- def can_coerce_to_pyobject(self, env):
- if self._convert_to_py_code is False:
- return None # tri-state-ish
-
+ def can_coerce_to_pyobject(self, env):
+ if self._convert_to_py_code is False:
+ return None # tri-state-ish
+
if env.outer_scope is None:
return False
@@ -3349,7 +3349,7 @@ class CStructOrUnionType(CType):
safe_union_types = set()
for member in self.scope.var_entries:
member_type = member.type
- if not member_type.can_coerce_to_pyobject(env):
+ if not member_type.can_coerce_to_pyobject(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
@@ -3365,29 +3365,29 @@ class CStructOrUnionType(CType):
self._convert_from_py_code = False
return False
- return True
-
- def create_to_py_utility_code(self, env):
- if not self.can_coerce_to_pyobject(env):
- return False
-
- if self._convert_to_py_code is None:
- for member in self.scope.var_entries:
- member.type.create_to_py_utility_code(env)
+ return True
+
+ def create_to_py_utility_code(self, env):
+ if not self.can_coerce_to_pyobject(env):
+ return False
+
+ if self._convert_to_py_code is None:
+ for member in self.scope.var_entries:
+ member.type.create_to_py_utility_code(env)
forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env)
env.use_utility_code(self._convert_to_py_code)
return True
- def can_coerce_from_pyobject(self, env):
- if env.outer_scope is None or self._convert_from_py_code is False:
- return False
- for member in self.scope.var_entries:
- if not member.type.can_coerce_from_pyobject(env):
- return False
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ if env.outer_scope is None or self._convert_from_py_code is False:
+ return False
+ for member in self.scope.var_entries:
+ if not member.type.can_coerce_from_pyobject(env):
+ return False
+ return True
+
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
@@ -3396,11 +3396,11 @@ class CStructOrUnionType(CType):
return None # tri-state-ish
if self._convert_from_py_code is None:
- if not self.scope.var_entries:
- # There are obviously missing fields; don't allow instantiation
- # where absolutely no content is provided.
- return False
-
+ if not self.scope.var_entries:
+ # There are obviously missing fields; don't allow instantiation
+ # where absolutely no content is provided.
+ return False
+
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
@@ -3408,7 +3408,7 @@ class CStructOrUnionType(CType):
return False
context = dict(
- struct_type=self,
+ struct_type=self,
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
@@ -3543,34 +3543,34 @@ class CppClassType(CType):
else:
return ''
- def can_coerce_from_pyobject(self, env):
- if self.cname in builtin_cpp_conversions:
- template_count = builtin_cpp_conversions[self.cname]
- for ix, T in enumerate(self.templates or []):
- if ix >= template_count:
- break
- if T.is_pyobject or not T.can_coerce_from_pyobject(env):
- return False
- return True
- elif self.cname in cpp_string_conversions:
- return True
- return False
-
+ def can_coerce_from_pyobject(self, env):
+ if self.cname in builtin_cpp_conversions:
+ template_count = builtin_cpp_conversions[self.cname]
+ for ix, T in enumerate(self.templates or []):
+ if ix >= template_count:
+ break
+ if T.is_pyobject or not T.can_coerce_from_pyobject(env):
+ return False
+ return True
+ elif self.cname in cpp_string_conversions:
+ return True
+ return False
+
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
- context = {}
+ context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
- context[X[ix]] = T
-
+ context[X[ix]] = T
+
if self.cname in cpp_string_conversions:
cls = 'string'
tags = type_identifier(self),
@@ -3579,42 +3579,42 @@ class CppClassType(CType):
else:
cls = 'arcadia_' + self.cname
cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags))
- context.update({
+ context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
- })
+ })
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
- cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx",
- context=context, compiler_directives=env.directives))
+ cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx",
+ context=context, compiler_directives=env.directives))
self.from_py_function = cname
return True
- def can_coerce_to_pyobject(self, env):
- if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
- for ix, T in enumerate(self.templates or []):
- if ix >= builtin_cpp_conversions[self.cname]:
- break
- if T.is_pyobject or not T.can_coerce_to_pyobject(env):
- return False
- return True
-
+ def can_coerce_to_pyobject(self, env):
+ if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
+ for ix, T in enumerate(self.templates or []):
+ if ix >= builtin_cpp_conversions[self.cname]:
+ break
+ if T.is_pyobject or not T.can_coerce_to_pyobject(env):
+ return False
+ return True
+
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
- context = {}
+ context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
- context[X[ix]] = T
-
+ context[X[ix]] = T
+
if self.cname in cpp_string_conversions:
cls = 'string'
prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode
@@ -3626,15 +3626,15 @@ class CppClassType(CType):
cls = 'arcadia_' + self.cname
prefix = ''
cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags))
- context.update({
+ context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
- })
+ })
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
- cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx",
- context=context, compiler_directives=env.directives))
+ cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx",
+ context=context, compiler_directives=env.directives))
self.to_py_function = cname
return True
@@ -3718,33 +3718,33 @@ class CppClassType(CType):
return specialized
def deduce_template_params(self, actual):
- if actual.is_const:
- actual = actual.const_base_type
- if actual.is_reference:
- actual = actual.ref_base_type
+ if actual.is_const:
+ actual = actual.const_base_type
+ if actual.is_reference:
+ actual = actual.ref_base_type
if self == actual:
return {}
elif actual.is_cpp_class:
- self_template_type = self
- while getattr(self_template_type, 'template_type', None):
- self_template_type = self_template_type.template_type
+ self_template_type = self
+ while getattr(self_template_type, 'template_type', None):
+ self_template_type = self_template_type.template_type
def all_bases(cls):
yield cls
for parent in cls.base_classes:
for base in all_bases(parent):
yield base
for actual_base in all_bases(actual):
- template_type = actual_base
- while getattr(template_type, 'template_type', None):
- template_type = template_type.template_type
- if (self_template_type.empty_declaration_code()
- == template_type.empty_declaration_code()):
- return reduce(
- merge_template_deductions,
- [formal_param.deduce_template_params(actual_param)
- for (formal_param, actual_param)
- in zip(self.templates, actual_base.templates)],
- {})
+ template_type = actual_base
+ while getattr(template_type, 'template_type', None):
+ template_type = template_type.template_type
+ if (self_template_type.empty_declaration_code()
+ == template_type.empty_declaration_code()):
+ return reduce(
+ merge_template_deductions,
+ [formal_param.deduce_template_params(actual_param)
+ for (formal_param, actual_param)
+ in zip(self.templates, actual_base.templates)],
+ {})
else:
return {}
@@ -3781,28 +3781,28 @@ class CppClassType(CType):
return 1
return 0
- def subclass_dist(self, super_type):
- if self.same_as_resolved_type(super_type):
- return 0
- elif not self.base_classes:
- return float('inf')
- else:
- return 1 + min(b.subclass_dist(super_type) for b in self.base_classes)
-
+ def subclass_dist(self, super_type):
+ if self.same_as_resolved_type(super_type):
+ return 0
+ elif not self.base_classes:
+ return float('inf')
+ else:
+ return 1 + min(b.subclass_dist(super_type) for b in self.base_classes)
+
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
- # This messy logic is needed due to GH Issue #1852.
+ # This messy logic is needed due to GH Issue #1852.
elif (self.cname == other_type.cname and
- (self.template_type and other_type.template_type
- or self.templates
- or other_type.templates)):
+ (self.template_type and other_type.template_type
+ or self.templates
+ or other_type.templates)):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if is_optional_template_param(t1) and is_optional_template_param(t2):
- break
+ break
if not t1.same_as_resolved_type(t2):
return 0
return 1
@@ -3812,10 +3812,10 @@ class CppClassType(CType):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
- elif other_type.is_cpp_class:
- return other_type.is_subclass(self)
- elif other_type.is_string and self.cname in cpp_string_conversions:
- return True
+ elif other_type.is_cpp_class:
+ return other_type.is_subclass(self)
+ elif other_type.is_string and self.cname in cpp_string_conversions:
+ return True
def attributes_known(self):
return self.scope is not None
@@ -3833,23 +3833,23 @@ class CppClassType(CType):
func_type = func_type.base_type
return func_type.return_type
- def get_constructor(self, pos):
- constructor = self.scope.lookup('<init>')
- if constructor is not None:
- return constructor
-
- # Otherwise: automatically declare no-args default constructor.
- # Make it "nogil" if the base classes allow it.
- nogil = True
- for base in self.base_classes:
- base_constructor = base.scope.lookup('<init>')
- if base_constructor and not base_constructor.type.nogil:
- nogil = False
- break
-
- func_type = CFuncType(self, [], exception_check='+', nogil=nogil)
- return self.scope.declare_cfunction(u'<init>', func_type, pos)
-
+ def get_constructor(self, pos):
+ constructor = self.scope.lookup('<init>')
+ if constructor is not None:
+ return constructor
+
+ # Otherwise: automatically declare no-args default constructor.
+ # Make it "nogil" if the base classes allow it.
+ nogil = True
+ for base in self.base_classes:
+ base_constructor = base.scope.lookup('<init>')
+ if base_constructor and not base_constructor.type.nogil:
+ nogil = False
+ break
+
+ func_type = CFuncType(self, [], exception_check='+', nogil=nogil)
+ return self.scope.declare_cfunction(u'<init>', func_type, pos)
+
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup(u'<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
@@ -3903,7 +3903,7 @@ def is_optional_template_param(type):
return isinstance(type, TemplatePlaceholderType) and type.optional
-class CEnumType(CIntLike, CType):
+class CEnumType(CIntLike, CType):
# name string
# cname string or None
# typedef_flag boolean
@@ -3991,12 +3991,12 @@ class CTupleType(CType):
return False
return True
- def can_coerce_from_pyobject(self, env):
- for component in self.components:
- if not component.can_coerce_from_pyobject(env):
- return False
- return True
-
+ def can_coerce_from_pyobject(self, env):
+ for component in self.components:
+ if not component.can_coerce_from_pyobject(env):
+ return False
+ return True
+
def create_to_py_utility_code(self, env):
if self._convert_to_py_code is False:
return None # tri-state-ish
@@ -4175,9 +4175,9 @@ c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
-# PEP-539 "Py_tss_t" type
-c_pytss_t_type = CPyTSSTType()
-
+# PEP-539 "Py_tss_t" type
+c_pytss_t_type = CPyTSSTType()
+
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
@@ -4243,7 +4243,7 @@ modifiers_and_name_to_type = {
#
(1, 0, "void"): c_void_type,
- (1, 0, "Py_tss_t"): c_pytss_t_type,
+ (1, 0, "Py_tss_t"): c_pytss_t_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
@@ -4329,7 +4329,7 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
{})
if deductions is None:
- errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (func_type, ', '.join(map(str, arg_types)))))
+ errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (func_type, ', '.join(map(str, arg_types)))))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
@@ -4362,7 +4362,7 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
needed_coercions = {}
for index, (func, func_type) in enumerate(candidates):
- score = [0,0,0,0,0,0,0]
+ score = [0,0,0,0,0,0,0]
for i in range(min(actual_nargs, len(func_type.args))):
src_type = arg_types[i]
dst_type = func_type.args[i].type
@@ -4373,17 +4373,17 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
- if not assignable:
- c_src_type = None
- if src_type.is_pyobject:
- if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
- c_src_type = dst_type.resolve()
- else:
- c_src_type = src_type.default_coerced_ctype()
- elif src_type.is_pythran_expr:
- c_src_type = src_type.org_buffer
-
- if c_src_type is not None:
+ if not assignable:
+ c_src_type = None
+ if src_type.is_pyobject:
+ if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
+ c_src_type = dst_type.resolve()
+ else:
+ c_src_type = src_type.default_coerced_ctype()
+ elif src_type.is_pythran_expr:
+ c_src_type = src_type.org_buffer
+
+ if c_src_type is not None:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
@@ -4400,13 +4400,13 @@ def best_match(arg_types, functions, pos=None, env=None, args=None):
(src_type.is_float and dst_type.is_float)):
score[2] += abs(dst_type.rank + (not dst_type.signed) -
(src_type.rank + (not src_type.signed))) + 1
- elif dst_type.is_ptr and src_type.is_ptr:
- if dst_type.base_type == c_void_type:
- score[4] += 1
- elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type):
- score[6] += src_type.base_type.subclass_dist(dst_type.base_type)
- else:
- score[5] += 1
+ elif dst_type.is_ptr and src_type.is_ptr:
+ if dst_type.base_type == c_void_type:
+ score[4] += 1
+ elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type):
+ score[6] += src_type.base_type.subclass_dist(dst_type.base_type)
+ else:
+ score[5] += 1
elif not src_type.is_pyobject:
score[1] += 1
else:
@@ -4464,10 +4464,10 @@ def widest_numeric_type(type1, type2):
type1 = type1.ref_base_type
if type2.is_reference:
type2 = type2.ref_base_type
- if type1.is_const:
- type1 = type1.const_base_type
- if type2.is_const:
- type2 = type2.const_base_type
+ if type1.is_const:
+ type1 = type1.const_base_type
+ if type2.is_const:
+ type2 = type2.const_base_type
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
diff --git a/contrib/tools/cython/Cython/Compiler/Pythran.py b/contrib/tools/cython/Cython/Compiler/Pythran.py
index c02704a918..eaf0f72bd7 100644
--- a/contrib/tools/cython/Cython/Compiler/Pythran.py
+++ b/contrib/tools/cython/Cython/Compiler/Pythran.py
@@ -1,11 +1,11 @@
-# cython: language_level=3
-
-from __future__ import absolute_import
-
-from .PyrexTypes import CType, CTypedefType, CStructOrUnionType
-
-import cython
-
+# cython: language_level=3
+
+from __future__ import absolute_import
+
+from .PyrexTypes import CType, CTypedefType, CStructOrUnionType
+
+import cython
+
try:
import pythran
pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9)
@@ -14,123 +14,123 @@ except ImportError:
pythran = None
pythran_is_pre_0_9 = True
pythran_is_pre_0_9_6 = True
-
+
if pythran_is_pre_0_9_6:
pythran_builtins = '__builtin__'
else:
pythran_builtins = 'builtins'
-# Pythran/Numpy specific operations
-
-def has_np_pythran(env):
+# Pythran/Numpy specific operations
+
+def has_np_pythran(env):
if env is None:
return False
directives = getattr(env, 'directives', None)
return (directives and directives.get('np_pythran', False))
-
-@cython.ccall
-def is_pythran_supported_dtype(type_):
- if isinstance(type_, CTypedefType):
- return is_pythran_supported_type(type_.typedef_base_type)
- return type_.is_numeric
-
-
-def pythran_type(Ty, ptype="ndarray"):
- if Ty.is_buffer:
- ndim,dtype = Ty.ndim, Ty.dtype
- if isinstance(dtype, CStructOrUnionType):
- ctype = dtype.cname
- elif isinstance(dtype, CType):
- ctype = dtype.sign_and_name()
- elif isinstance(dtype, CTypedefType):
- ctype = dtype.typedef_cname
- else:
- raise ValueError("unsupported type %s!" % dtype)
+
+@cython.ccall
+def is_pythran_supported_dtype(type_):
+ if isinstance(type_, CTypedefType):
+ return is_pythran_supported_type(type_.typedef_base_type)
+ return type_.is_numeric
+
+
+def pythran_type(Ty, ptype="ndarray"):
+ if Ty.is_buffer:
+ ndim,dtype = Ty.ndim, Ty.dtype
+ if isinstance(dtype, CStructOrUnionType):
+ ctype = dtype.cname
+ elif isinstance(dtype, CType):
+ ctype = dtype.sign_and_name()
+ elif isinstance(dtype, CTypedefType):
+ ctype = dtype.typedef_cname
+ else:
+ raise ValueError("unsupported type %s!" % dtype)
if pythran_is_pre_0_9:
return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim)
else:
return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim))
- if Ty.is_pythran_expr:
- return Ty.pythran_type
- #if Ty.is_none:
+ if Ty.is_pythran_expr:
+ return Ty.pythran_type
+ #if Ty.is_none:
# return "decltype(pythonic::builtins::None)"
- if Ty.is_numeric:
- return Ty.sign_and_name()
- raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty)))
-
-
-@cython.cfunc
-def type_remove_ref(ty):
- return "typename std::remove_reference<%s>::type" % ty
-
-
-def pythran_binop_type(op, tA, tB):
+ if Ty.is_numeric:
+ return Ty.sign_and_name()
+ raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty)))
+
+
+@cython.cfunc
+def type_remove_ref(ty):
+ return "typename std::remove_reference<%s>::type" % ty
+
+
+def pythran_binop_type(op, tA, tB):
if op == '**':
return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % (
pythran_type(tA), pythran_type(tB))
else:
return "decltype(std::declval<%s>() %s std::declval<%s>())" % (
pythran_type(tA), op, pythran_type(tB))
-
-
-def pythran_unaryop_type(op, type_):
- return "decltype(%sstd::declval<%s>())" % (
- op, pythran_type(type_))
-
-
-@cython.cfunc
-def _index_access(index_code, indices):
- indexing = ",".join([index_code(idx) for idx in indices])
- return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
-
-
-def _index_type_code(index_with_type):
- idx, index_type = index_with_type
- if idx.is_slice:
+
+
+def pythran_unaryop_type(op, type_):
+ return "decltype(%sstd::declval<%s>())" % (
+ op, pythran_type(type_))
+
+
+@cython.cfunc
+def _index_access(index_code, indices):
+ indexing = ",".join([index_code(idx) for idx in indices])
+ return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
+
+
+def _index_type_code(index_with_type):
+ idx, index_type = index_with_type
+ if idx.is_slice:
n = 2 + int(not idx.step.is_none)
return "pythonic::%s::functor::slice{}(%s)" % (
pythran_builtins,
",".join(["0"]*n))
- elif index_type.is_int:
- return "std::declval<%s>()" % index_type.sign_and_name()
- elif index_type.is_pythran_expr:
- return "std::declval<%s>()" % index_type.pythran_type
- raise ValueError("unsupported indexing type %s!" % index_type)
-
-
-def _index_code(idx):
- if idx.is_slice:
- values = idx.start, idx.stop, idx.step
- if idx.step.is_none:
- func = "contiguous_slice"
- values = values[:2]
- else:
- func = "slice"
- return "pythonic::types::%s(%s)" % (
- func, ",".join((v.pythran_result() for v in values)))
- elif idx.type.is_int:
- return to_pythran(idx)
- elif idx.type.is_pythran_expr:
- return idx.pythran_result()
- raise ValueError("unsupported indexing type %s" % idx.type)
-
-
-def pythran_indexing_type(type_, indices):
- return type_remove_ref("decltype(std::declval<%s>()%s)" % (
- pythran_type(type_),
- _index_access(_index_type_code, indices),
- ))
-
-
-def pythran_indexing_code(indices):
- return _index_access(_index_code, indices)
-
+ elif index_type.is_int:
+ return "std::declval<%s>()" % index_type.sign_and_name()
+ elif index_type.is_pythran_expr:
+ return "std::declval<%s>()" % index_type.pythran_type
+ raise ValueError("unsupported indexing type %s!" % index_type)
+
+
+def _index_code(idx):
+ if idx.is_slice:
+ values = idx.start, idx.stop, idx.step
+ if idx.step.is_none:
+ func = "contiguous_slice"
+ values = values[:2]
+ else:
+ func = "slice"
+ return "pythonic::types::%s(%s)" % (
+ func, ",".join((v.pythran_result() for v in values)))
+ elif idx.type.is_int:
+ return to_pythran(idx)
+ elif idx.type.is_pythran_expr:
+ return idx.pythran_result()
+ raise ValueError("unsupported indexing type %s" % idx.type)
+
+
+def pythran_indexing_type(type_, indices):
+ return type_remove_ref("decltype(std::declval<%s>()%s)" % (
+ pythran_type(type_),
+ _index_access(_index_type_code, indices),
+ ))
+
+
+def pythran_indexing_code(indices):
+ return _index_access(_index_code, indices)
+
def np_func_to_list(func):
if not func.is_numpy_attribute:
return []
return np_func_to_list(func.obj) + [func.attribute]
-
+
if pythran is None:
def pythran_is_numpy_func_supported(name):
return False
@@ -149,79 +149,79 @@ def pythran_functor(func):
submodules = "::".join(func[:-1] + ["functor"])
return "pythonic::numpy::%s::%s" % (submodules, func[-1])
-def pythran_func_type(func, args):
- args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
+def pythran_func_type(func, args):
+ args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args))
return "decltype(%s{}(%s))" % (pythran_functor(func), args)
-
-
-@cython.ccall
-def to_pythran(op, ptype=None):
- op_type = op.type
- if op_type.is_int:
- # Make sure that integer literals always have exactly the type that the templates expect.
- return op_type.cast_code(op.result())
- if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]):
- return op.result()
- if op.is_none:
+
+
+@cython.ccall
+def to_pythran(op, ptype=None):
+ op_type = op.type
+ if op_type.is_int:
+ # Make sure that integer literals always have exactly the type that the templates expect.
+ return op_type.cast_code(op.result())
+ if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]):
+ return op.result()
+ if op.is_none:
return "pythonic::%s::None" % pythran_builtins
- if ptype is None:
- ptype = pythran_type(op_type)
-
- assert op.type.is_pyobject
- return "from_python<%s>(%s)" % (ptype, op.py_result())
-
-
-@cython.cfunc
-def is_type(type_, types):
- for attr in types:
- if getattr(type_, attr, False):
- return True
- return False
-
-
-def is_pythran_supported_node_or_none(node):
- return node.is_none or is_pythran_supported_type(node.type)
-
-
-@cython.ccall
-def is_pythran_supported_type(type_):
- pythran_supported = (
- "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex")
- return is_type(type_, pythran_supported) or is_pythran_expr(type_)
-
-
-def is_pythran_supported_operation_type(type_):
- pythran_supported = (
- "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
- return is_type(type_,pythran_supported) or is_pythran_expr(type_)
-
-
-@cython.ccall
-def is_pythran_expr(type_):
- return type_.is_pythran_expr
-
-
-def is_pythran_buffer(type_):
- return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and
- type_.mode in ("c", "strided") and not type_.cast)
-
+ if ptype is None:
+ ptype = pythran_type(op_type)
+
+ assert op.type.is_pyobject
+ return "from_python<%s>(%s)" % (ptype, op.py_result())
+
+
+@cython.cfunc
+def is_type(type_, types):
+ for attr in types:
+ if getattr(type_, attr, False):
+ return True
+ return False
+
+
+def is_pythran_supported_node_or_none(node):
+ return node.is_none or is_pythran_supported_type(node.type)
+
+
+@cython.ccall
+def is_pythran_supported_type(type_):
+ pythran_supported = (
+ "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex")
+ return is_type(type_, pythran_supported) or is_pythran_expr(type_)
+
+
+def is_pythran_supported_operation_type(type_):
+ pythran_supported = (
+ "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex")
+ return is_type(type_,pythran_supported) or is_pythran_expr(type_)
+
+
+@cython.ccall
+def is_pythran_expr(type_):
+ return type_.is_pythran_expr
+
+
+def is_pythran_buffer(type_):
+ return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and
+ type_.mode in ("c", "strided") and not type_.cast)
+
def pythran_get_func_include_file(func):
func = np_func_to_list(func)
return "pythonic/numpy/%s.hpp" % "/".join(func)
-
-def include_pythran_generic(env):
- # Generic files
- env.add_include_file("pythonic/core.hpp")
- env.add_include_file("pythonic/python/core.hpp")
- env.add_include_file("pythonic/types/bool.hpp")
- env.add_include_file("pythonic/types/ndarray.hpp")
+
+def include_pythran_generic(env):
+ # Generic files
+ env.add_include_file("pythonic/core.hpp")
+ env.add_include_file("pythonic/python/core.hpp")
+ env.add_include_file("pythonic/types/bool.hpp")
+ env.add_include_file("pythonic/types/ndarray.hpp")
env.add_include_file("pythonic/numpy/power.hpp")
env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins)
- env.add_include_file("<new>") # for placement new
-
- for i in (8, 16, 32, 64):
- env.add_include_file("pythonic/types/uint%d.hpp" % i)
- env.add_include_file("pythonic/types/int%d.hpp" % i)
- for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
+ env.add_include_file("<new>") # for placement new
+
+ for i in (8, 16, 32, 64):
+ env.add_include_file("pythonic/types/uint%d.hpp" % i)
+ env.add_include_file("pythonic/types/int%d.hpp" % i)
+ for t in ("float", "float32", "float64", "set", "slice", "tuple", "int",
"complex", "complex64", "complex128"):
- env.add_include_file("pythonic/types/%s.hpp" % t)
+ env.add_include_file("pythonic/types/%s.hpp" % t)
diff --git a/contrib/tools/cython/Cython/Compiler/Scanning.py b/contrib/tools/cython/Cython/Compiler/Scanning.py
index c721bba69b..421ca64c86 100644
--- a/contrib/tools/cython/Cython/Compiler/Scanning.py
+++ b/contrib/tools/cython/Cython/Compiler/Scanning.py
@@ -1,4 +1,4 @@
-# cython: infer_types=True, language_level=3, py2_import=True, auto_pickle=False
+# cython: infer_types=True, language_level=3, py2_import=True, auto_pickle=False
#
# Cython Scanner
#
@@ -63,13 +63,13 @@ class Method(object):
# self.kwargs is almost always unused => avoid call overhead
return method(text, **self.kwargs) if self.kwargs is not None else method(text)
- def __copy__(self):
- return self # immutable, no need to copy
-
- def __deepcopy__(self, memo):
- return self # immutable, no need to copy
-
+ def __copy__(self):
+ return self # immutable, no need to copy
+ def __deepcopy__(self, memo):
+ return self # immutable, no need to copy
+
+
#------------------------------------------------------------------
class CompileTimeScope(object):
@@ -170,7 +170,7 @@ class SourceDescriptor(object):
if self._escaped_description is None:
esc_desc = \
self.get_description().encode('ASCII', 'replace').decode("ASCII")
- # Use forward slashes on Windows since these paths
+ # Use forward slashes on Windows since these paths
# will be used in the #line directives in the C/C++ files.
self._escaped_description = esc_desc.replace('\\', '/')
return self._escaped_description
@@ -196,13 +196,13 @@ class SourceDescriptor(object):
except AttributeError:
return False
- def __copy__(self):
- return self # immutable, no need to copy
-
- def __deepcopy__(self, memo):
- return self # immutable, no need to copy
-
+ def __copy__(self):
+ return self # immutable, no need to copy
+ def __deepcopy__(self, memo):
+ return self # immutable, no need to copy
+
+
class FileSourceDescriptor(SourceDescriptor):
"""
Represents a code source. A code source is a more generic abstraction
@@ -215,9 +215,9 @@ class FileSourceDescriptor(SourceDescriptor):
filename = Utils.decode_filename(filename)
self.path_description = path_description or filename
self.filename = filename
- # Prefer relative paths to current directory (which is most likely the project root) over absolute paths.
- workdir = os.path.abspath('.') + os.sep
- self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename
+ # Prefer relative paths to current directory (which is most likely the project root) over absolute paths.
+ workdir = os.path.abspath('.') + os.sep
+ self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename
self.set_file_type_from_name(filename)
self._cmp_name = filename
self._lines = {}
@@ -245,8 +245,8 @@ class FileSourceDescriptor(SourceDescriptor):
return lines
def get_description(self):
- # Dump path_description, it's already arcadia root relative (required for proper file matching in coverage)
- return self.path_description
+ # Dump path_description, it's already arcadia root relative (required for proper file matching in coverage)
+ return self.path_description
try:
return os.path.relpath(self.path_description)
except ValueError:
@@ -261,7 +261,7 @@ class FileSourceDescriptor(SourceDescriptor):
return path
def get_filenametable_entry(self):
- return self.file_path
+ return self.file_path
def __eq__(self, other):
return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
diff --git a/contrib/tools/cython/Cython/Compiler/StringEncoding.py b/contrib/tools/cython/Cython/Compiler/StringEncoding.py
index c37e8aab79..617d7502de 100644
--- a/contrib/tools/cython/Cython/Compiler/StringEncoding.py
+++ b/contrib/tools/cython/Cython/Compiler/StringEncoding.py
@@ -87,7 +87,7 @@ class BytesLiteralBuilder(object):
def getstrings(self):
return (self.getstring(), None)
-
+
class StrLiteralBuilder(object):
"""Assemble both a bytes and a unicode representation of a string.
"""
@@ -219,14 +219,14 @@ def bytes_literal(s, encoding):
return s
-def encoded_string(s, encoding):
- assert isinstance(s, (_unicode, bytes))
- s = EncodedString(s)
- if encoding is not None:
- s.encoding = encoding
- return s
-
-
+def encoded_string(s, encoding):
+ assert isinstance(s, (_unicode, bytes))
+ s = EncodedString(s)
+ if encoding is not None:
+ s.encoding = encoding
+ return s
+
+
char_from_escape_sequence = {
r'\a' : u'\a',
r'\b' : u'\b',
diff --git a/contrib/tools/cython/Cython/Compiler/Symtab.py b/contrib/tools/cython/Cython/Compiler/Symtab.py
index 7361a55aea..868f96ebf7 100644
--- a/contrib/tools/cython/Cython/Compiler/Symtab.py
+++ b/contrib/tools/cython/Cython/Compiler/Symtab.py
@@ -4,9 +4,9 @@
from __future__ import absolute_import
-import re
+import re
import copy
-import operator
+import operator
try:
import __builtin__ as builtins
@@ -18,9 +18,9 @@ from .StringEncoding import EncodedString
from . import Options, Naming
from . import PyrexTypes
from .PyrexTypes import py_object_type, unspecified_type
-from .TypeSlots import (
- pyfunction_signature, pymethod_signature, richcmp_special_methods,
- get_special_method_signature, get_property_accessor_signature)
+from .TypeSlots import (
+ pyfunction_signature, pymethod_signature, richcmp_special_methods,
+ get_special_method_signature, get_property_accessor_signature)
from . import Future
from . import Code
@@ -36,13 +36,13 @@ iso_c99_keywords = set(
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
- if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
- or cname in ('__weakref__', '__dict__')))
- or cname in iso_c99_keywords):
+ if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
+ or cname in ('__weakref__', '__dict__')))
+ or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
-
+
class BufferAux(object):
writable_needed = False
@@ -61,7 +61,7 @@ class Entry(object):
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
- # annotation ExprNode PEP 484/526 annotation
+ # annotation ExprNode PEP 484/526 annotation
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
@@ -91,7 +91,7 @@ class Entry(object):
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
- # in_subscope boolean Belongs to a generator expression scope
+ # in_subscope boolean Belongs to a generator expression scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
@@ -123,7 +123,7 @@ class Entry(object):
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
- # Ideally this should not be necessary.
+ # Ideally this should not be necessary.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
@@ -140,7 +140,7 @@ class Entry(object):
inline_func_in_pxd = False
borrowed = 0
init = ""
- annotation = None
+ annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
@@ -168,7 +168,7 @@ class Entry(object):
is_local = 0
in_closure = 0
from_closure = 0
- in_subscope = 0
+ in_subscope = 0
is_declared_generic = 0
is_readonly = 0
pyfunc_cname = None
@@ -219,12 +219,12 @@ class Entry(object):
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
- def already_declared_here(self):
- error(self.pos, "Previous declaration is here")
-
+ def already_declared_here(self):
+ error(self.pos, "Previous declaration is here")
+
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
- self.already_declared_here()
+ self.already_declared_here()
def all_alternatives(self):
return [self] + self.overloaded_alternatives
@@ -308,7 +308,7 @@ class Scope(object):
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
- is_genexpr_scope = 0
+ is_genexpr_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
@@ -318,7 +318,7 @@ class Scope(object):
in_cinclude = 0
nogil = 0
fused_to_specific = None
- return_type = None
+ return_type = None
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
@@ -335,7 +335,7 @@ class Scope(object):
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
- self.subscopes = set()
+ self.subscopes = set()
self.const_entries = []
self.type_entries = []
self.sue_entries = []
@@ -430,12 +430,12 @@ class Scope(object):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
- def iter_local_scopes(self):
- yield self
- if self.subscopes:
- for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')):
- yield scope
-
+ def iter_local_scopes(self):
+ yield self
+ if self.subscopes:
+ for scope in sorted(self.subscopes, key=operator.attrgetter('scope_prefix')):
+ yield scope
+
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0, create_wrapper = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
@@ -447,33 +447,33 @@ class Scope(object):
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
- old_entry = entries[name]
-
- # Reject redeclared C++ functions only if they have the same type signature.
- cpp_override_allowed = False
- if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
- for alt_entry in old_entry.all_alternatives():
- if type == alt_entry.type:
- if name == '<init>' and not type.args:
- # Cython pre-declares the no-args constructor - allow later user definitions.
- cpp_override_allowed = True
- break
- else:
- cpp_override_allowed = True
-
- if cpp_override_allowed:
- # C++ function/method overrides with different signatures are ok.
- pass
- elif self.is_cpp_class_scope and entries[name].is_inherited:
- # Likewise ignore inherited classes.
+ old_entry = entries[name]
+
+ # Reject redeclared C++ functions only if they have the same type signature.
+ cpp_override_allowed = False
+ if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
+ for alt_entry in old_entry.all_alternatives():
+ if type == alt_entry.type:
+ if name == '<init>' and not type.args:
+ # Cython pre-declares the no-args constructor - allow later user definitions.
+ cpp_override_allowed = True
+ break
+ else:
+ cpp_override_allowed = True
+
+ if cpp_override_allowed:
+ # C++ function/method overrides with different signatures are ok.
pass
+ elif self.is_cpp_class_scope and entries[name].is_inherited:
+ # Likewise ignore inherited classes.
+ pass
elif visibility == 'extern':
- # Silenced outside of "cdef extern" blocks, until we have a safe way to
- # prevent pxd-defined cpdef functions from ending up here.
- warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
+ # Silenced outside of "cdef extern" blocks, until we have a safe way to
+ # prevent pxd-defined cpdef functions from ending up here.
+ warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
- entries[name].already_declared_here()
+ entries[name].already_declared_here()
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
@@ -605,7 +605,7 @@ class Scope(object):
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
- entry.already_declared_here()
+ entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
@@ -616,13 +616,13 @@ class Scope(object):
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
- entry.already_declared_here()
+ entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
- entry.already_declared_here()
+ entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
@@ -632,7 +632,7 @@ class Scope(object):
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
- entry.type.scope.declare_inherited_cpp_attributes(base_class)
+ entry.type.scope.declare_inherited_cpp_attributes(base_class)
if scope:
declare_inherited_attributes(entry, base_classes)
scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
@@ -773,10 +773,10 @@ class Scope(object):
if overridable != entry.is_overridable:
warning(pos, "Function '%s' previously declared as '%s'" % (
name, 'cpdef' if overridable else 'cdef'), 1)
- if entry.type.same_as(type):
- # Fix with_gil vs nogil.
- entry.type = entry.type.with_with_gil(type.with_gil)
- else:
+ if entry.type.same_as(type):
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
+ else:
if visibility == 'extern' and entry.visibility == 'extern':
can_override = False
if self.is_cpp():
@@ -796,10 +796,10 @@ class Scope(object):
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
- elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
- # TODO: check that this was done by a signature optimisation and not a user error.
- #warning(pos, "Function signature does not match previous declaration", 1)
- entry.type = type
+ elif not in_pxd and entry.defined_in_pxd and type.compatible_signature_with(entry.type):
+ # TODO: check that this was done by a signature optimisation and not a user error.
+ #warning(pos, "Function signature does not match previous declaration", 1)
+ entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
@@ -830,23 +830,23 @@ class Scope(object):
type.entry = entry
return entry
- def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
+ def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
- if inherited or type.is_fused:
- self.cfunc_entries.append(entry)
- else:
- # For backwards compatibility reasons, we must keep all non-fused methods
- # before all fused methods, but separately for each type.
- i = len(self.cfunc_entries)
- for cfunc_entry in reversed(self.cfunc_entries):
- if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
- break
- i -= 1
- self.cfunc_entries.insert(i, entry)
+ if inherited or type.is_fused:
+ self.cfunc_entries.append(entry)
+ else:
+ # For backwards compatibility reasons, we must keep all non-fused methods
+ # before all fused methods, but separately for each type.
+ i = len(self.cfunc_entries)
+ for cfunc_entry in reversed(self.cfunc_entries):
+ if cfunc_entry.is_inherited or not cfunc_entry.type.is_fused:
+ break
+ i -= 1
+ self.cfunc_entries.insert(i, entry)
return entry
def find(self, name, pos):
@@ -946,19 +946,19 @@ class Scope(object):
self.global_scope().use_entry_utility_code(entry)
def defines_any(self, names):
- # Test whether any of the given names are defined in this scope.
+ # Test whether any of the given names are defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
- def defines_any_special(self, names):
- # Test whether any of the given names are defined as special methods in this scope.
- for name in names:
- if name in self.entries and self.entries[name].is_special:
- return 1
- return 0
-
+ def defines_any_special(self, names):
+ # Test whether any of the given names are defined as special methods in this scope.
+ for name in names:
+ if name in self.entries and self.entries[name].is_special:
+ return 1
+ return 0
+
def infer_types(self):
from .TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
@@ -1114,8 +1114,8 @@ class ModuleScope(Scope):
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
- # c_includes {key: IncludeCode} C headers or verbatim code to be generated
- # See process_include() for more documentation
+ # c_includes {key: IncludeCode} C headers or verbatim code to be generated
+ # See process_include() for more documentation
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
@@ -1158,7 +1158,7 @@ class ModuleScope(Scope):
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
- self.c_includes = {}
+ self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
@@ -1169,10 +1169,10 @@ class ModuleScope(Scope):
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
self._cached_tuple_types = {}
- for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
- '__spec__', '__loader__', '__package__', '__cached__']:
+ for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
+ '__spec__', '__loader__', '__package__', '__cached__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
- self.process_include(Code.IncludeCode("Python.h", initial=True))
+ self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
@@ -1299,58 +1299,58 @@ class ModuleScope(Scope):
module = module.lookup_submodule(submodule)
return module
- def add_include_file(self, filename, verbatim_include=None, late=False):
- """
- Add `filename` as include file. Add `verbatim_include` as
- verbatim text in the C file.
- Both `filename` and `verbatim_include` can be `None` or empty.
- """
- inc = Code.IncludeCode(filename, verbatim_include, late=late)
- self.process_include(inc)
-
- def process_include(self, inc):
- """
- Add `inc`, which is an instance of `IncludeCode`, to this
- `ModuleScope`. This either adds a new element to the
- `c_includes` dict or it updates an existing entry.
-
- In detail: the values of the dict `self.c_includes` are
- instances of `IncludeCode` containing the code to be put in the
- generated C file. The keys of the dict are needed to ensure
- uniqueness in two ways: if an include file is specified in
- multiple "cdef extern" blocks, only one `#include` statement is
- generated. Second, the same include might occur multiple times
- if we find it through multiple "cimport" paths. So we use the
- generated code (of the form `#include "header.h"`) as dict key.
-
- If verbatim code does not belong to any include file (i.e. it
- was put in a `cdef extern from *` block), then we use a unique
- dict key: namely, the `sortkey()`.
-
- One `IncludeCode` object can contain multiple pieces of C code:
- one optional "main piece" for the include file and several other
- pieces for the verbatim code. The `IncludeCode.dict_update`
- method merges the pieces of two different `IncludeCode` objects
- if needed.
- """
- key = inc.mainpiece()
- if key is None:
- key = inc.sortkey()
- inc.dict_update(self.c_includes, key)
- inc = self.c_includes[key]
-
+ def add_include_file(self, filename, verbatim_include=None, late=False):
+ """
+ Add `filename` as include file. Add `verbatim_include` as
+ verbatim text in the C file.
+ Both `filename` and `verbatim_include` can be `None` or empty.
+ """
+ inc = Code.IncludeCode(filename, verbatim_include, late=late)
+ self.process_include(inc)
+
+ def process_include(self, inc):
+ """
+ Add `inc`, which is an instance of `IncludeCode`, to this
+ `ModuleScope`. This either adds a new element to the
+ `c_includes` dict or it updates an existing entry.
+
+ In detail: the values of the dict `self.c_includes` are
+ instances of `IncludeCode` containing the code to be put in the
+ generated C file. The keys of the dict are needed to ensure
+ uniqueness in two ways: if an include file is specified in
+ multiple "cdef extern" blocks, only one `#include` statement is
+ generated. Second, the same include might occur multiple times
+ if we find it through multiple "cimport" paths. So we use the
+ generated code (of the form `#include "header.h"`) as dict key.
+
+ If verbatim code does not belong to any include file (i.e. it
+ was put in a `cdef extern from *` block), then we use a unique
+ dict key: namely, the `sortkey()`.
+
+ One `IncludeCode` object can contain multiple pieces of C code:
+ one optional "main piece" for the include file and several other
+ pieces for the verbatim code. The `IncludeCode.dict_update`
+ method merges the pieces of two different `IncludeCode` objects
+ if needed.
+ """
+ key = inc.mainpiece()
+ if key is None:
+ key = inc.sortkey()
+ inc.dict_update(self.c_includes, key)
+ inc = self.c_includes[key]
+
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
- for inc in scope.c_includes.values():
- self.process_include(inc)
+ for inc in scope.c_includes.values():
+ self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
- if entry.is_pyglobal:
- # Allow cimports to follow imports.
- entry.is_variable = True
+ if entry.is_pyglobal:
+ # Allow cimports to follow imports.
+ entry.is_variable = True
if entry not in self.entries:
self.entries[name] = entry
else:
@@ -1376,7 +1376,7 @@ class ModuleScope(Scope):
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
- entry.is_variable = 0
+ entry.is_variable = 0
entry.as_module = scope
self.add_imported_module(scope)
return entry
@@ -1428,8 +1428,8 @@ class ModuleScope(Scope):
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
- if entry.type.declaration_value:
- entry.init = entry.type.declaration_value
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
@@ -1440,9 +1440,9 @@ class ModuleScope(Scope):
def declare_cfunction(self, name, type, pos,
cname=None, visibility='private', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
- if not defining and 'inline' in modifiers:
- # TODO(github/1736): Make this an error.
- warning(pos, "Declarations should not be declared inline.", 1)
+ if not defining and 'inline' in modifiers:
+ # TODO(github/1736): Make this an error.
+ warning(pos, "Declarations should not be declared inline.", 1)
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
@@ -1763,8 +1763,8 @@ class LocalScope(Scope):
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
- if entry.type.declaration_value:
- entry.init = entry.type.declaration_value
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
@@ -1784,7 +1784,7 @@ class LocalScope(Scope):
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
- orig_entry.already_declared_here()
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
@@ -1795,10 +1795,10 @@ class LocalScope(Scope):
# Return None if not found.
entry = Scope.lookup(self, name)
if entry is not None:
- entry_scope = entry.scope
- while entry_scope.is_genexpr_scope:
- entry_scope = entry_scope.outer_scope
- if entry_scope is not self and entry_scope.is_closure_scope:
+ entry_scope = entry.scope
+ while entry_scope.is_genexpr_scope:
+ entry_scope = entry_scope.outer_scope
+ if entry_scope is not self and entry_scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError("lookup() after scope class created.")
# The actual c fragment for the different scopes differs
@@ -1811,19 +1811,19 @@ class LocalScope(Scope):
return entry
def mangle_closure_cnames(self, outer_scope_cname):
- for scope in self.iter_local_scopes():
- for entry in scope.entries.values():
- if entry.from_closure:
- cname = entry.outer_entry.cname
- if self.is_passthrough:
- entry.cname = cname
- else:
- if cname.startswith(Naming.cur_scope_cname):
- cname = cname[len(Naming.cur_scope_cname)+2:]
- entry.cname = "%s->%s" % (outer_scope_cname, cname)
- elif entry.in_closure:
- entry.original_cname = entry.cname
- entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
+ for scope in self.iter_local_scopes():
+ for entry in scope.entries.values():
+ if entry.from_closure:
+ cname = entry.outer_entry.cname
+ if self.is_passthrough:
+ entry.cname = cname
+ else:
+ if cname.startswith(Naming.cur_scope_cname):
+ cname = cname[len(Naming.cur_scope_cname)+2:]
+ entry.cname = "%s->%s" % (outer_scope_cname, cname)
+ elif entry.in_closure:
+ entry.original_cname = entry.cname
+ entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
class GeneratorExpressionScope(Scope):
@@ -1831,25 +1831,25 @@ class GeneratorExpressionScope(Scope):
to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
- is_genexpr_scope = True
-
+ is_genexpr_scope = True
+
def __init__(self, outer_scope):
- parent_scope = outer_scope
- # TODO: also ignore class scopes?
- while parent_scope.is_genexpr_scope:
- parent_scope = parent_scope.parent_scope
- name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
- Scope.__init__(self, name, outer_scope, parent_scope)
+ parent_scope = outer_scope
+ # TODO: also ignore class scopes?
+ while parent_scope.is_genexpr_scope:
+ parent_scope = parent_scope.parent_scope
+ name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
+ Scope.__init__(self, name, outer_scope, parent_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
- # Class/ExtType scopes are filled at class creation time, i.e. from the
- # module init function or surrounding function.
- while outer_scope.is_genexpr_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
- outer_scope = outer_scope.outer_scope
- self.var_entries = outer_scope.var_entries # keep declarations outside
- outer_scope.subscopes.add(self)
-
+ # Class/ExtType scopes are filled at class creation time, i.e. from the
+ # module init function or surrounding function.
+ while outer_scope.is_genexpr_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
+ outer_scope = outer_scope.outer_scope
+ self.var_entries = outer_scope.var_entries # keep declarations outside
+ outer_scope.subscopes.add(self)
+
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
@@ -1865,12 +1865,12 @@ class GeneratorExpressionScope(Scope):
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
- entry.is_variable = True
- if self.parent_scope.is_module_scope:
- entry.is_cglobal = True
- else:
- entry.is_local = True
- entry.in_subscope = True
+ entry.is_variable = True
+ if self.parent_scope.is_module_scope:
+ entry.is_cglobal = True
+ else:
+ entry.is_local = True
+ entry.in_subscope = True
self.var_entries.append(entry)
self.entries[name] = entry
return entry
@@ -1916,7 +1916,7 @@ class StructOrUnionScope(Scope):
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
- allow_pyobject=False, allow_memoryview=False):
+ allow_pyobject=False, allow_memoryview=False):
# Add an entry for an attribute.
if not cname:
cname = name
@@ -1928,12 +1928,12 @@ class StructOrUnionScope(Scope):
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
- error(pos, "C struct/union member cannot be a Python object")
- elif type.is_memoryviewslice and not allow_memoryview:
- # Memory views wrap their buffer owner as a Python object.
- error(pos, "C struct/union member cannot be a memory view")
+ error(pos, "C struct/union member cannot be a Python object")
+ elif type.is_memoryviewslice and not allow_memoryview:
+ # Memory views wrap their buffer owner as a Python object.
+ error(pos, "C struct/union member cannot be a memory view")
if visibility != 'private':
- error(pos, "C struct/union member cannot be declared %s" % visibility)
+ error(pos, "C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
@@ -2018,7 +2018,7 @@ class PyClassScope(ClassScope):
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
- orig_entry.already_declared_here()
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None:
@@ -2058,7 +2058,7 @@ class CClassScope(ClassScope):
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
- is_closure_class_scope = False
+ is_closure_class_scope = False
has_pyobject_attrs = False
has_memoryview_attrs = False
@@ -2102,7 +2102,7 @@ class CClassScope(ClassScope):
for entry in self.var_entries:
if entry.type.is_pyobject:
- if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"):
+ if include_weakref or (self.is_closure_class_scope or entry.name != "__weakref__"):
if include_gc_simple or not entry.type.is_gc_simple:
py_attrs.append(entry)
elif entry.type == PyrexTypes.c_py_buffer_type:
@@ -2122,7 +2122,7 @@ class CClassScope(ClassScope):
error(pos,
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
- if not self.is_closure_class_scope and get_special_method_signature(name):
+ if not self.is_closure_class_scope and get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
@@ -2140,7 +2140,7 @@ class CClassScope(ClassScope):
self.has_memoryview_attrs = True
elif type.is_cpp_class:
self.has_cpp_class_attrs = True
- elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'):
+ elif type.is_pyobject and (self.is_closure_class_scope or name != '__weakref__'):
self.has_pyobject_attrs = True
if (not type.is_builtin_type
or not type.scope or type.scope.needs_gc()):
@@ -2153,7 +2153,7 @@ class CClassScope(ClassScope):
# so do conversion ourself rather than rely on the CPython mechanism (through
# a property; made in AnalyseDeclarationsTransform).
entry.needs_property = True
- if not self.is_closure_class_scope and name == "__weakref__":
+ if not self.is_closure_class_scope and name == "__weakref__":
error(pos, "Special attribute __weakref__ cannot be exposed to Python")
if not (type.is_pyobject or type.can_coerce_to_pyobject(self)):
# we're not testing for coercion *from* Python here - that would fail later
@@ -2177,13 +2177,13 @@ class CClassScope(ClassScope):
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
- if name in richcmp_special_methods:
- if self.lookup_here('__richcmp__'):
- error(pos, "Cannot define both % and __richcmp__" % name)
- elif name == '__richcmp__':
- for n in richcmp_special_methods:
- if self.lookup_here(n):
- error(pos, "Cannot define both % and __richcmp__" % n)
+ if name in richcmp_special_methods:
+ if self.lookup_here('__richcmp__'):
+ error(pos, "Cannot define both % and __richcmp__" % name)
+ elif name == '__richcmp__':
+ for n in richcmp_special_methods:
+ if self.lookup_here(n):
+ error(pos, "Cannot define both % and __richcmp__" % n)
if name == "__new__":
error(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
@@ -2203,7 +2203,7 @@ class CClassScope(ClassScope):
return entry
def lookup_here(self, name):
- if not self.is_closure_class_scope and name == "__new__":
+ if not self.is_closure_class_scope and name == "__new__":
name = EncodedString("__cinit__")
entry = ClassScope.lookup_here(self, name)
if entry and entry.is_builtin_cmethod:
@@ -2242,18 +2242,18 @@ class CClassScope(ClassScope):
if entry.is_final_cmethod and entry.is_inherited:
error(pos, "Overriding final methods is not allowed")
elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
- # Fix with_gil vs nogil.
- entry.type = entry.type.with_with_gil(type.with_gil)
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
- if (self.defined and not in_pxd
- and not type.same_c_signature_as_resolved_type(entry.type, as_cmethod = 1, as_pxd_definition = 1)):
- # TODO(robertwb): Make this an error.
- warning(pos,
- "Compatible but non-identical C method '%s' not redeclared "
+ if (self.defined and not in_pxd
+ and not type.same_c_signature_as_resolved_type(entry.type, as_cmethod = 1, as_pxd_definition = 1)):
+ # TODO(robertwb): Make this an error.
+ warning(pos,
+ "Compatible but non-identical C method '%s' not redeclared "
"in definition part of extension type '%s'. "
"This may cause incorrect vtables to be generated." % (
name, self.class_name), 2)
- warning(entry.pos, "Previous declaration is here", 2)
+ warning(entry.pos, "Previous declaration is here", 2)
entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
else:
error(pos, "Signature not compatible with previous declaration")
@@ -2262,7 +2262,7 @@ class CClassScope(ClassScope):
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
- " extension type '%s'" % (name, self.class_name))
+ " extension type '%s'" % (name, self.class_name))
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
@@ -2279,11 +2279,11 @@ class CClassScope(ClassScope):
return entry
- def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
+ def add_cfunction(self, name, type, pos, cname, visibility, modifiers, inherited=False):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = ClassScope.add_cfunction(self, name, type, pos, cname,
- visibility, modifiers, inherited=inherited)
+ visibility, modifiers, inherited=inherited)
entry.is_cmethod = 1
entry.prev_entry = prev_entry
return entry
@@ -2345,7 +2345,7 @@ class CClassScope(ClassScope):
cname = adapt(cname)
entry = self.add_cfunction(base_entry.name, base_entry.type,
base_entry.pos, cname,
- base_entry.visibility, base_entry.func_modifiers, inherited=True)
+ base_entry.visibility, base_entry.func_modifiers, inherited=True)
entry.is_inherited = 1
if base_entry.is_final_cmethod:
entry.is_final_cmethod = True
@@ -2380,18 +2380,18 @@ class CppClassScope(Scope):
def declare_var(self, name, type, pos,
cname = None, visibility = 'extern',
- api = 0, in_pxd = 0, is_cdef = 0, defining = 0):
+ api = 0, in_pxd = 0, is_cdef = 0, defining = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.lookup_here(name)
if defining and entry is not None:
- if entry.type.same_as(type):
- # Fix with_gil vs nogil.
- entry.type = entry.type.with_with_gil(type.with_gil)
- elif type.is_cfunction and type.compatible_signature_with(entry.type):
- entry.type = type
- else:
+ if entry.type.same_as(type):
+ # Fix with_gil vs nogil.
+ entry.type = entry.type.with_with_gil(type.with_gil)
+ elif type.is_cfunction and type.compatible_signature_with(entry.type):
+ entry.type = type
+ else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
@@ -2406,31 +2406,31 @@ class CppClassScope(Scope):
def declare_cfunction(self, name, type, pos,
cname=None, visibility='extern', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
- class_name = self.name.split('::')[-1]
- if name in (class_name, '__init__') and cname is None:
- cname = "%s__init__%s" % (Naming.func_prefix, class_name)
+ class_name = self.name.split('::')[-1]
+ if name in (class_name, '__init__') and cname is None:
+ cname = "%s__init__%s" % (Naming.func_prefix, class_name)
name = '<init>'
- type.return_type = PyrexTypes.CVoidType()
- # This is called by the actual constructor, but need to support
- # arguments that cannot by called by value.
- type.original_args = type.args
- def maybe_ref(arg):
- if arg.type.is_cpp_class and not arg.type.is_reference:
- return PyrexTypes.CFuncTypeArg(
- arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
- else:
- return arg
- type.args = [maybe_ref(arg) for arg in type.args]
+ type.return_type = PyrexTypes.CVoidType()
+ # This is called by the actual constructor, but need to support
+ # arguments that cannot by called by value.
+ type.original_args = type.args
+ def maybe_ref(arg):
+ if arg.type.is_cpp_class and not arg.type.is_reference:
+ return PyrexTypes.CFuncTypeArg(
+ arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
+ else:
+ return arg
+ type.args = [maybe_ref(arg) for arg in type.args]
elif name == '__dealloc__' and cname is None:
- cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
+ cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = '<del>'
- type.return_type = PyrexTypes.CVoidType()
- if name in ('<init>', '<del>') and type.nogil:
- for base in self.type.base_classes:
- base_entry = base.scope.lookup(name)
- if base_entry and not base_entry.type.nogil:
- error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
- error(base_entry.pos, "Base constructor defined here.")
+ type.return_type = PyrexTypes.CVoidType()
+ if name in ('<init>', '<del>') and type.nogil:
+ for base in self.type.base_classes:
+ base_entry = base.scope.lookup(name)
+ if base_entry and not base_entry.type.nogil:
+ error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
+ error(base_entry.pos, "Base constructor defined here.")
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
@@ -2441,22 +2441,22 @@ class CppClassScope(Scope):
type.entry = entry
return entry
- def declare_inherited_cpp_attributes(self, base_class):
- base_scope = base_class.scope
- template_type = base_class
- while getattr(template_type, 'template_type', None):
- template_type = template_type.template_type
- if getattr(template_type, 'templates', None):
- base_templates = [T.name for T in template_type.templates]
- else:
- base_templates = ()
+ def declare_inherited_cpp_attributes(self, base_class):
+ base_scope = base_class.scope
+ template_type = base_class
+ while getattr(template_type, 'template_type', None):
+ template_type = template_type.template_type
+ if getattr(template_type, 'templates', None):
+ base_templates = [T.name for T in template_type.templates]
+ else:
+ base_templates = ()
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
- #constructor/destructor is not inherited
- if base_entry.name in ("<init>", "<del>"):
+ #constructor/destructor is not inherited
+ if base_entry.name in ("<init>", "<del>"):
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
@@ -2464,7 +2464,7 @@ class CppClassScope(Scope):
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
- entry.is_inherited = 1
+ entry.is_inherited = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
@@ -2473,12 +2473,12 @@ class CppClassScope(Scope):
modifiers=base_entry.func_modifiers,
utility_code=base_entry.utility_code)
entry.is_inherited = 1
- for base_entry in base_scope.type_entries:
- if base_entry.name not in base_templates:
- entry = self.declare_type(base_entry.name, base_entry.type,
- base_entry.pos, base_entry.cname,
- base_entry.visibility)
- entry.is_inherited = 1
+ for base_entry in base_scope.type_entries:
+ if base_entry.name not in base_templates:
+ entry = self.declare_type(base_entry.name, base_entry.type,
+ base_entry.pos, base_entry.cname,
+ base_entry.visibility)
+ entry.is_inherited = 1
def specialize(self, values, type_entry):
scope = CppClassScope(self.name, self.outer_scope)
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py b/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py
index 3792f26e99..793672925d 100644
--- a/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestMemView.py
@@ -13,7 +13,7 @@ class TestMemviewParsing(CythonTest):
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
-
+
def test_default_1dim(self):
self.parse(u"cdef int[:] x")
self.parse(u"cdef short int[:] x")
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py
index 9ee8da5478..ef9cd62bb1 100644
--- a/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTreeFragment.py
@@ -45,7 +45,7 @@ class TestTreeFragments(CythonTest):
T = F.substitute({"v" : NameNode(pos=None, name="a")})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
- self.assertEqual(v.pos, a.pos)
+ self.assertEqual(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py
index bee53b3d2b..dd14846652 100644
--- a/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTreePath.py
@@ -20,75 +20,75 @@ class TestTreePath(TransformTest):
def test_node_path(self):
t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode")))
- self.assertEqual(2, len(find_all(t, "//NameNode")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
- self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode")))
+ self.assertEqual(2, len(find_all(t, "//NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
def test_node_path_star(self):
t = self._build_tree()
- self.assertEqual(10, len(find_all(t, "//*")))
- self.assertEqual(8, len(find_all(t, "//DefNode//*")))
- self.assertEqual(0, len(find_all(t, "//NameNode//*")))
+ self.assertEqual(10, len(find_all(t, "//*")))
+ self.assertEqual(8, len(find_all(t, "//DefNode//*")))
+ self.assertEqual(0, len(find_all(t, "//NameNode//*")))
def test_node_path_attribute(self):
t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
- self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
+ self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
+ self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
def test_node_path_attribute_dotted(self):
t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
- self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
+ self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
def test_node_path_child(self):
t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
def test_node_path_node_predicate(self):
t = self._build_tree()
- self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
- self.assertEqual(Nodes.ReturnStatNode,
- type(find_first(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//ReturnStatNode[./NameNode]")))
def test_node_path_node_predicate_step(self):
t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
- self.assertEqual(Nodes.ReturnStatNode,
- type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
def test_node_path_attribute_exists(self):
t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
- self.assertEqual(ExprNodes.NameNode,
- type(find_first(t, "//NameNode[@name]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
+ self.assertEqual(ExprNodes.NameNode,
+ type(find_first(t, "//NameNode[@name]")))
def test_node_path_attribute_exists_not(self):
t = self._build_tree()
- self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
- self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
def test_node_path_and(self):
t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
- self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
- self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
def test_node_path_attribute_string_predicate(self):
t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
+ self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
def test_node_path_recursive_predicate(self):
t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
if __name__ == '__main__':
unittest.main()
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py b/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py
index f2f6f3773b..11e07dd75c 100644
--- a/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestTypes.py
@@ -1,19 +1,19 @@
-from __future__ import absolute_import
-
-import unittest
-
-import Cython.Compiler.PyrexTypes as PT
-
-
-class TestMethodDispatcherTransform(unittest.TestCase):
-
- def test_widest_numeric_type(self):
- def assert_widest(type1, type2, widest):
- self.assertEqual(widest, PT.widest_numeric_type(type1, type2))
-
- assert_widest(PT.c_int_type, PT.c_long_type, PT.c_long_type)
- assert_widest(PT.c_double_type, PT.c_long_type, PT.c_double_type)
- assert_widest(PT.c_longdouble_type, PT.c_long_type, PT.c_longdouble_type)
-
- cenum = PT.CEnumType("E", "cenum", typedef_flag=False)
- assert_widest(PT.c_int_type, cenum, PT.c_int_type)
+from __future__ import absolute_import
+
+import unittest
+
+import Cython.Compiler.PyrexTypes as PT
+
+
+class TestMethodDispatcherTransform(unittest.TestCase):
+
+ def test_widest_numeric_type(self):
+ def assert_widest(type1, type2, widest):
+ self.assertEqual(widest, PT.widest_numeric_type(type1, type2))
+
+ assert_widest(PT.c_int_type, PT.c_long_type, PT.c_long_type)
+ assert_widest(PT.c_double_type, PT.c_long_type, PT.c_double_type)
+ assert_widest(PT.c_longdouble_type, PT.c_long_type, PT.c_longdouble_type)
+
+ cenum = PT.CEnumType("E", "cenum", typedef_flag=False)
+ assert_widest(PT.c_int_type, cenum, PT.c_int_type)
diff --git a/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py b/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py
index 3d1906ca0b..fe360ec53a 100644
--- a/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py
+++ b/contrib/tools/cython/Cython/Compiler/Tests/TestUtilityLoad.py
@@ -23,27 +23,27 @@ class TestUtilityLoader(unittest.TestCase):
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name))
- self.assertEqual(got, self.expected)
+ self.assertEqual(got, self.expected)
got = strip_2tup(self.cls.load_as_string(self.name, self.filename))
- self.assertEqual(got, self.expected)
+ self.assertEqual(got, self.expected)
def test_load(self):
utility = self.cls.load(self.name)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEqual(got, self.expected)
+ self.assertEqual(got, self.expected)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEqual(got, self.required)
+ self.assertEqual(got, self.required)
utility = self.cls.load(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEqual(got, self.expected)
+ self.assertEqual(got, self.expected)
utility = self.cls.load_cached(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEqual(got, self.expected)
+ self.assertEqual(got, self.expected)
class TestTempitaUtilityLoader(TestUtilityLoader):
@@ -60,20 +60,20 @@ class TestTempitaUtilityLoader(TestUtilityLoader):
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
- self.assertEqual(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
def test_load(self):
utility = self.cls.load(self.name, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEqual(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEqual(got, self.required_tempita)
+ self.assertEqual(got, self.required_tempita)
utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEqual(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
class TestCythonUtilityLoader(TestTempitaUtilityLoader):
diff --git a/contrib/tools/cython/Cython/Compiler/TreeFragment.py b/contrib/tools/cython/Cython/Compiler/TreeFragment.py
index b85da8191a..ca4c636d99 100644
--- a/contrib/tools/cython/Cython/Compiler/TreeFragment.py
+++ b/contrib/tools/cython/Cython/Compiler/TreeFragment.py
@@ -24,7 +24,7 @@ from . import UtilNodes
class StringParseContext(Main.Context):
- def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
+ def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
if include_directories is None:
include_directories = []
if compiler_directives is None:
@@ -209,9 +209,9 @@ def strip_common_indent(lines):
"""Strips empty lines and common indentation from the list of strings given in lines"""
# TODO: Facilitate textwrap.indent instead
lines = [x for x in lines if x.strip() != u""]
- if lines:
- minindent = min([len(_match_indent(x).group(0)) for x in lines])
- lines = [x[minindent:] for x in lines]
+ if lines:
+ minindent = min([len(_match_indent(x).group(0)) for x in lines])
+ lines = [x[minindent:] for x in lines]
return lines
diff --git a/contrib/tools/cython/Cython/Compiler/TreePath.py b/contrib/tools/cython/Cython/Compiler/TreePath.py
index 8585905557..da3877dbe8 100644
--- a/contrib/tools/cython/Cython/Compiler/TreePath.py
+++ b/contrib/tools/cython/Cython/Compiler/TreePath.py
@@ -191,8 +191,8 @@ def parse_path_value(next):
return int(value)
except ValueError:
pass
- elif token[1].isdigit():
- return int(token[1])
+ elif token[1].isdigit():
+ return int(token[1])
else:
name = token[1].lower()
if name == 'true':
diff --git a/contrib/tools/cython/Cython/Compiler/TypeInference.py b/contrib/tools/cython/Cython/Compiler/TypeInference.py
index c7ffee7d24..939db6cc94 100644
--- a/contrib/tools/cython/Cython/Compiler/TypeInference.py
+++ b/contrib/tools/cython/Cython/Compiler/TypeInference.py
@@ -250,7 +250,7 @@ class MarkParallelAssignments(EnvTransform):
def visit_YieldExprNode(self, node):
if self.parallel_block_stack:
- error(node.pos, "'%s' not allowed in parallel sections" % node.expr_keyword)
+ error(node.pos, "'%s' not allowed in parallel sections" % node.expr_keyword)
return node
def visit_ReturnStatNode(self, node):
@@ -306,13 +306,13 @@ class MarkOverflowingArithmetic(CythonTransform):
else:
return self.visit_dangerous_node(node)
- def visit_SimpleCallNode(self, node):
- if node.function.is_name and node.function.name == 'abs':
- # Overflows for minimum value of fixed size ints.
- return self.visit_dangerous_node(node)
- else:
- return self.visit_neutral_node(node)
-
+ def visit_SimpleCallNode(self, node):
+ if node.function.is_name and node.function.name == 'abs':
+ # Overflows for minimum value of fixed size ints.
+ return self.visit_dangerous_node(node)
+ else:
+ return self.visit_neutral_node(node)
+
visit_UnopNode = visit_neutral_node
visit_UnaryMinusNode = visit_dangerous_node
@@ -378,7 +378,7 @@ class SimpleAssignmentTypeInferer(object):
self.set_entry_type(entry, py_object_type)
return
- # Set of assignments
+ # Set of assignments
assignments = set()
assmts_resolved = set()
dependencies = {}
@@ -415,24 +415,24 @@ class SimpleAssignmentTypeInferer(object):
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos, scope)
- def inferred_types(entry):
- has_none = False
- has_pyobjects = False
- types = []
- for assmt in entry.cf_assignments:
- if assmt.rhs.is_none:
- has_none = True
- else:
- rhs_type = assmt.inferred_type
- if rhs_type and rhs_type.is_pyobject:
- has_pyobjects = True
- types.append(rhs_type)
- # Ignore None assignments as long as there are concrete Python type assignments.
- # but include them if None is the only assigned Python object.
- if has_none and not has_pyobjects:
- types.append(py_object_type)
- return types
-
+ def inferred_types(entry):
+ has_none = False
+ has_pyobjects = False
+ types = []
+ for assmt in entry.cf_assignments:
+ if assmt.rhs.is_none:
+ has_none = True
+ else:
+ rhs_type = assmt.inferred_type
+ if rhs_type and rhs_type.is_pyobject:
+ has_pyobjects = True
+ types.append(rhs_type)
+ # Ignore None assignments as long as there are concrete Python type assignments.
+ # but include them if None is the only assigned Python object.
+ if has_none and not has_pyobjects:
+ types.append(py_object_type)
+ return types
+
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
@@ -485,7 +485,7 @@ class SimpleAssignmentTypeInferer(object):
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
- types = inferred_types(entry)
+ types = inferred_types(entry)
if types and all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos, scope)
@@ -495,9 +495,9 @@ class SimpleAssignmentTypeInferer(object):
def reinfer():
dirty = False
for entry in inferred:
- for assmt in entry.cf_assignments:
- assmt.infer_type()
- types = inferred_types(entry)
+ for assmt in entry.cf_assignments:
+ assmt.infer_type()
+ types = inferred_types(entry)
new_type = spanning_type(types, entry.might_overflow, entry.pos, scope)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
@@ -563,8 +563,8 @@ def safe_spanning_type(types, might_overflow, pos, scope):
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
- elif result_type.is_pythran_expr:
- return result_type
+ elif result_type.is_pythran_expr:
+ return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
diff --git a/contrib/tools/cython/Cython/Compiler/TypeSlots.py b/contrib/tools/cython/Cython/Compiler/TypeSlots.py
index 0b4ff67042..3337cee960 100644
--- a/contrib/tools/cython/Cython/Compiler/TypeSlots.py
+++ b/contrib/tools/cython/Cython/Compiler/TypeSlots.py
@@ -12,9 +12,9 @@ from .Errors import error
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
-richcmp_special_methods = ['__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__']
-
+richcmp_special_methods = ['__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__']
+
class Signature(object):
# Method slot signature descriptor.
#
@@ -305,11 +305,11 @@ class MethodSlot(SlotDescriptor):
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
- if entry and entry.is_special and entry.func_cname:
+ if entry and entry.is_special and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
- if entry and entry.is_special and entry.func_cname:
+ if entry and entry.is_special and entry.func_cname:
return entry.func_cname
return "0"
@@ -365,13 +365,13 @@ class ConstructorSlot(InternalMethodSlot):
self.method = method
def slot_code(self, scope):
- entry = scope.lookup_here(self.method)
+ entry = scope.lookup_here(self.method)
if (self.slot_name != 'tp_new'
and scope.parent_type.base_type
and not scope.has_pyobject_attrs
and not scope.has_memoryview_attrs
and not scope.has_cpp_class_attrs
- and not (entry and entry.is_special)):
+ and not (entry and entry.is_special)):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
@@ -380,8 +380,8 @@ class ConstructorSlot(InternalMethodSlot):
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
- if entry and not entry.is_special:
- return "0"
+ if entry and not entry.is_special:
+ return "0"
return InternalMethodSlot.slot_code(self, scope)
@@ -399,23 +399,23 @@ class SyntheticSlot(InternalMethodSlot):
self.default_value = default_value
def slot_code(self, scope):
- if scope.defines_any_special(self.user_methods):
+ if scope.defines_any_special(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
-class RichcmpSlot(MethodSlot):
- def slot_code(self, scope):
- entry = scope.lookup_here(self.method_name)
- if entry and entry.is_special and entry.func_cname:
- return entry.func_cname
- elif scope.defines_any_special(richcmp_special_methods):
- return scope.mangle_internal(self.slot_name)
- else:
- return "0"
-
-
+class RichcmpSlot(MethodSlot):
+ def slot_code(self, scope):
+ entry = scope.lookup_here(self.method_name)
+ if entry and entry.is_special and entry.func_cname:
+ return entry.func_cname
+ elif scope.defines_any_special(richcmp_special_methods):
+ return scope.mangle_internal(self.slot_name)
+ else:
+ return "0"
+
+
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
@@ -535,7 +535,7 @@ class DictOffsetSlot(SlotDescriptor):
# Slot descriptor for a class' dict offset, for dynamic attributes.
def slot_code(self, scope):
- dict_entry = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
+ dict_entry = scope.lookup_here("__dict__") if not scope.is_closure_class_scope else None
if dict_entry and dict_entry.is_variable:
if getattr(dict_entry.type, 'cname', None) != 'PyDict_Type':
error(dict_entry.pos, "__dict__ slot must be of type 'dict'")
@@ -575,8 +575,8 @@ def get_special_method_signature(name):
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
- elif name in richcmp_special_methods:
- return ibinaryfunc
+ elif name in richcmp_special_methods:
+ return ibinaryfunc
else:
return None
@@ -612,20 +612,20 @@ def get_slot_function(scope, slot):
return slot_code
return None
-
-def get_slot_by_name(slot_name):
- # For now, only search the type struct, no referenced sub-structs.
- for slot in slot_table:
- if slot.slot_name == slot_name:
- return slot
- assert False, "Slot not found: %s" % slot_name
-
-
-def get_slot_code_by_name(scope, slot_name):
- slot = get_slot_by_name(slot_name)
- return slot.slot_code(scope)
-
-
+
+def get_slot_by_name(slot_name):
+ # For now, only search the type struct, no referenced sub-structs.
+ for slot in slot_table:
+ if slot.slot_name == slot_name:
+ return slot
+ assert False, "Slot not found: %s" % slot_name
+
+
+def get_slot_code_by_name(scope, slot_name):
+ slot = get_slot_by_name(slot_name)
+ return slot.slot_code(scope)
+
+
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
@@ -692,7 +692,7 @@ delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
-richcmpfunc = Signature("TOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
+richcmpfunc = Signature("TOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
@@ -725,7 +725,7 @@ property_accessor_signatures = {
#
#------------------------------------------------------------------------------------------
-PyNumberMethods_Py3_GUARD = "PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)"
+PyNumberMethods_Py3_GUARD = "PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)"
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
@@ -856,7 +856,7 @@ slot_table = (
GCDependentSlot("tp_traverse"),
GCClearReferencesSlot("tp_clear"),
- RichcmpSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
+ RichcmpSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
EmptySlot("tp_weaklistoffset"),
@@ -910,7 +910,7 @@ MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
-MethodSlot(getattrofunc, "", "__getattribute__")
+MethodSlot(getattrofunc, "", "__getattribute__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
diff --git a/contrib/tools/cython/Cython/Compiler/UtilNodes.py b/contrib/tools/cython/Cython/Compiler/UtilNodes.py
index c41748ace0..bafbbb37c2 100644
--- a/contrib/tools/cython/Cython/Compiler/UtilNodes.py
+++ b/contrib/tools/cython/Cython/Compiler/UtilNodes.py
@@ -1,7 +1,7 @@
#
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
-# so it is convenient to have them in a separate module.
+# so it is convenient to have them in a separate module.
#
from __future__ import absolute_import
@@ -267,9 +267,9 @@ class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
def infer_type(self, env):
return self.subexpression.infer_type(env)
- def may_be_none(self):
- return self.subexpression.may_be_none()
-
+ def may_be_none(self):
+ return self.subexpression.may_be_none()
+
def result(self):
return self.subexpression.result()
diff --git a/contrib/tools/cython/Cython/Compiler/UtilityCode.py b/contrib/tools/cython/Cython/Compiler/UtilityCode.py
index 98e9ab5bfb..4a90470fd1 100644
--- a/contrib/tools/cython/Cython/Compiler/UtilityCode.py
+++ b/contrib/tools/cython/Cython/Compiler/UtilityCode.py
@@ -11,7 +11,7 @@ class NonManglingModuleScope(Symtab.ModuleScope):
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
- self.cpp = kw.pop('cpp', False)
+ self.cpp = kw.pop('cpp', False)
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
@@ -43,7 +43,7 @@ class CythonUtilityCodeContext(StringParseContext):
if self.scope is None:
self.scope = NonManglingModuleScope(
- self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp)
+ self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp)
return self.scope
@@ -76,13 +76,13 @@ class CythonUtilityCode(Code.UtilityCodeBase):
# while the generated node trees can be altered in the compilation of a
# single file.
# Hence, delay any processing until later.
- context_types = {}
+ context_types = {}
if context is not None:
- from .PyrexTypes import BaseType
- for key, value in context.items():
- if isinstance(value, BaseType):
- context[key] = key
- context_types[key] = value
+ from .PyrexTypes import BaseType
+ for key, value in context.items():
+ if isinstance(value, BaseType):
+ context[key] = key
+ context_types[key] = value
impl = Code.sub_tempita(impl, context, file, name)
self.impl = impl
self.name = name
@@ -92,7 +92,7 @@ class CythonUtilityCode(Code.UtilityCodeBase):
self.from_scope = from_scope
self.outer_module_scope = outer_module_scope
self.compiler_directives = compiler_directives
- self.context_types = context_types
+ self.context_types = context_types
def __eq__(self, other):
if isinstance(other, CythonUtilityCode):
@@ -118,8 +118,8 @@ class CythonUtilityCode(Code.UtilityCodeBase):
from . import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(
- self.name, compiler_directives=self.compiler_directives,
- cpp=cython_scope.is_cpp() if cython_scope else False)
+ self.name, compiler_directives=self.compiler_directives,
+ cpp=cython_scope.is_cpp() if cython_scope else False)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
@@ -170,18 +170,18 @@ class CythonUtilityCode(Code.UtilityCodeBase):
pipeline, scope_transform,
before=ParseTreeTransforms.AnalyseDeclarationsTransform)
- if self.context_types:
- # inject types into module scope
- def scope_transform(module_node):
- for name, type in self.context_types.items():
- entry = module_node.scope.declare_type(name, type, None, visibility='extern')
- entry.in_cinclude = True
- return module_node
-
- pipeline = Pipeline.insert_into_pipeline(
- pipeline, scope_transform,
- before=ParseTreeTransforms.AnalyseDeclarationsTransform)
-
+ if self.context_types:
+ # inject types into module scope
+ def scope_transform(module_node):
+ for name, type in self.context_types.items():
+ entry = module_node.scope.declare_type(name, type, None, visibility='extern')
+ entry.in_cinclude = True
+ return module_node
+
+ pipeline = Pipeline.insert_into_pipeline(
+ pipeline, scope_transform,
+ before=ParseTreeTransforms.AnalyseDeclarationsTransform)
+
(err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
assert not err, err
self.tree = tree
@@ -223,7 +223,7 @@ class CythonUtilityCode(Code.UtilityCodeBase):
for dep in self.requires:
if dep.is_cython_utility:
- dep.declare_in_scope(dest_scope, cython_scope=cython_scope)
+ dep.declare_in_scope(dest_scope, cython_scope=cython_scope)
return original_scope
diff --git a/contrib/tools/cython/Cython/Compiler/Visitor.py b/contrib/tools/cython/Cython/Compiler/Visitor.py
index a35d13e1d0..c06b764f14 100644
--- a/contrib/tools/cython/Cython/Compiler/Visitor.py
+++ b/contrib/tools/cython/Cython/Compiler/Visitor.py
@@ -598,23 +598,23 @@ class MethodDispatcherTransform(EnvTransform):
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
- entry = function.entry
+ entry = function.entry
is_builtin = (
- entry.is_builtin or
- entry is self.current_env().builtin_scope().lookup_here(function.name))
+ entry.is_builtin or
+ entry is self.current_env().builtin_scope().lookup_here(function.name))
if not is_builtin:
if function.cf_state and function.cf_state.is_single:
# we know the value of the variable
# => see if it's usable instead
return self._delegate_to_assigned_value(
node, function, arg_list, kwargs)
- if arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type:
- if entry.scope.parent_type is arg_list[0].type:
- # Optimised (unbound) method of a builtin type => try to "de-optimise".
- return self._dispatch_to_method_handler(
- entry.name, self_arg=None, is_unbound_method=True,
- type_name=entry.scope.parent_type.name,
- node=node, function=function, arg_list=arg_list, kwargs=kwargs)
+ if arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type:
+ if entry.scope.parent_type is arg_list[0].type:
+ # Optimised (unbound) method of a builtin type => try to "de-optimise".
+ return self._dispatch_to_method_handler(
+ entry.name, self_arg=None, is_unbound_method=True,
+ type_name=entry.scope.parent_type.name,
+ node=node, function=function, arg_list=arg_list, kwargs=kwargs)
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
@@ -640,7 +640,7 @@ class MethodDispatcherTransform(EnvTransform):
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
- if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject:
+ if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject:
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = self_arg.name