summaryrefslogtreecommitdiffstats
path: root/contrib/tools/cython/Cython/Compiler/Nodes.py
diff options
context:
space:
mode:
authorAleksandr <[email protected]>2022-02-10 16:47:52 +0300
committerDaniil Cherednik <[email protected]>2022-02-10 16:47:52 +0300
commitb05913d1c3c02a773578bceb7285084d2933ae86 (patch)
treec0748b5dcbade83af788c0abfa89c0383d6b779c /contrib/tools/cython/Cython/Compiler/Nodes.py
parentea6c5b7f172becca389cacaff7d5f45f6adccbe6 (diff)
Restoring authorship annotation for Aleksandr <[email protected]>. Commit 2 of 2.
Diffstat (limited to 'contrib/tools/cython/Cython/Compiler/Nodes.py')
-rw-r--r--contrib/tools/cython/Cython/Compiler/Nodes.py1554
1 files changed, 777 insertions, 777 deletions
diff --git a/contrib/tools/cython/Cython/Compiler/Nodes.py b/contrib/tools/cython/Cython/Compiler/Nodes.py
index 0796f40c0f0..6436c5002d6 100644
--- a/contrib/tools/cython/Cython/Compiler/Nodes.py
+++ b/contrib/tools/cython/Cython/Compiler/Nodes.py
@@ -10,7 +10,7 @@ cython.declare(sys=object, os=object, copy=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
- error_type=object, _py_int_types=object)
+ error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
@@ -28,7 +28,7 @@ from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
-from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
+from .Pythran import has_np_pythran, pythran_type, is_pythran_buffer
from ..Utils import add_metaclass
@@ -39,7 +39,7 @@ else:
def relative_position(pos):
- return (pos[0].get_filenametable_entry(), pos[1])
+ return (pos[0].get_filenametable_entry(), pos[1])
def embed_position(pos, docstring):
@@ -68,13 +68,13 @@ def embed_position(pos, docstring):
return doc
-def analyse_type_annotation(annotation, env, assigned_value=None):
+def analyse_type_annotation(annotation, env, assigned_value=None):
base_type = None
- is_ambiguous = False
+ is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
- warning(annotation.pos,
- "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
+ warning(annotation.pos,
+ "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
@@ -88,30 +88,30 @@ def analyse_type_annotation(annotation, env, assigned_value=None):
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
- if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
- # Map builtin numeric Python types to C types in safe cases.
- if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
- assigned_type = assigned_value.infer_type(env)
- if assigned_type and assigned_type.is_pyobject:
- # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
- is_ambiguous = True
- arg_type = None
- # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
- if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
- arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
- elif arg_type is not None and annotation.is_string_literal:
- warning(annotation.pos,
- "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
+ if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
+ # Map builtin numeric Python types to C types in safe cases.
+ if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
+ assigned_type = assigned_value.infer_type(env)
+ if assigned_type and assigned_type.is_pyobject:
+ # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
+ is_ambiguous = True
+ arg_type = None
+ # ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
+ if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
+ arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
+ elif arg_type is not None and annotation.is_string_literal:
+ warning(annotation.pos,
+ "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
- elif is_ambiguous:
- warning(annotation.pos, "Ambiguous types in annotation, ignoring")
+ elif is_ambiguous:
+ warning(annotation.pos, "Ambiguous types in annotation, ignoring")
else:
- warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
+ warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
return base_type, arg_type
@@ -474,9 +474,9 @@ class StatNode(Node):
class CDefExternNode(StatNode):
- # include_file string or None
- # verbatim_include string or None
- # body StatListNode
+ # include_file string or None
+ # verbatim_include string or None
+ # body StatListNode
child_attrs = ["body"]
@@ -486,18 +486,18 @@ class CDefExternNode(StatNode):
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
- if self.include_file or self.verbatim_include:
- # Determine whether include should be late
- stats = self.body.stats
- if not env.directives['preliminary_late_includes_cy28']:
- late = False
- elif not stats:
- # Special case: empty 'cdef extern' blocks are early
- late = False
- else:
- late = all(isinstance(node, CVarDefNode) for node in stats)
- env.add_include_file(self.include_file, self.verbatim_include, late)
-
+ if self.include_file or self.verbatim_include:
+ # Determine whether include should be late
+ stats = self.body.stats
+ if not env.directives['preliminary_late_includes_cy28']:
+ late = False
+ elif not stats:
+ # Special case: empty 'cdef extern' blocks are early
+ late = False
+ else:
+ late = all(isinstance(node, CVarDefNode) for node in stats)
+ env.add_include_file(self.include_file, self.verbatim_include, late)
+
def analyse_expressions(self, env):
return self
@@ -539,7 +539,7 @@ class CNameDeclaratorNode(CDeclaratorNode):
default = None
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
@@ -565,11 +565,11 @@ class CPtrDeclaratorNode(CDeclaratorNode):
def analyse_templates(self):
return self.base.analyse_templates()
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
- return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(ptr_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CReferenceDeclaratorNode(CDeclaratorNode):
@@ -580,11 +580,11 @@ class CReferenceDeclaratorNode(CDeclaratorNode):
def analyse_templates(self):
return self.base.analyse_templates()
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos, "Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
- return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(ref_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CArrayDeclaratorNode(CDeclaratorNode):
@@ -593,7 +593,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
child_attrs = ["base", "dimension"]
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if (base_type.is_cpp_class and base_type.is_template_type()) or base_type.is_cfunction:
from .ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
@@ -607,7 +607,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
base_type = error_type
else:
base_type = base_type.specialize_here(self.pos, values)
- return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(base_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
if self.dimension:
self.dimension = self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
@@ -628,7 +628,7 @@ class CArrayDeclaratorNode(CDeclaratorNode):
if base_type.is_cfunction:
error(self.pos, "Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
- return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(array_type, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CFuncDeclaratorNode(CDeclaratorNode):
@@ -671,7 +671,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
else:
return None
- def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
+ def analyse(self, return_type, env, nonempty=0, directive_locals=None, visibility=None, in_pxd=False):
if directive_locals is None:
directive_locals = {}
if nonempty:
@@ -723,16 +723,16 @@ class CFuncDeclaratorNode(CDeclaratorNode):
and self.exception_check != '+'):
error(self.pos, "Exception clause not allowed for function returning Python object")
else:
- if self.exception_value is None and self.exception_check and self.exception_check != '+':
- # Use an explicit exception return value to speed up exception checks.
- # Even if it is not declared, we can use the default exception value of the return type,
- # unless the function is some kind of external function that we do not control.
- if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
- # Extension types are more difficult because the signature must match the base type signature.
- if not env.is_c_class_scope:
- from .ExprNodes import ConstNode
- self.exception_value = ConstNode(
- self.pos, value=return_type.exception_value, type=return_type)
+ if self.exception_value is None and self.exception_check and self.exception_check != '+':
+ # Use an explicit exception return value to speed up exception checks.
+ # Even if it is not declared, we can use the default exception value of the return type,
+ # unless the function is some kind of external function that we do not control.
+ if return_type.exception_value is not None and (visibility != 'extern' and not in_pxd):
+ # Extension types are more difficult because the signature must match the base type signature.
+ if not env.is_c_class_scope:
+ from .ExprNodes import ConstNode
+ self.exception_value = ConstNode(
+ self.pos, value=return_type.exception_value, type=return_type)
if self.exception_value:
self.exception_value = self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
@@ -789,7 +789,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
- return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(func_type, env, visibility=visibility, in_pxd=in_pxd)
def declare_optional_arg_struct(self, func_type, env, fused_cname=None):
"""
@@ -803,7 +803,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
- scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
+ scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
@@ -829,12 +829,12 @@ class CConstDeclaratorNode(CDeclaratorNode):
child_attrs = ["base"]
- def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
+ def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if base_type.is_pyobject:
error(self.pos,
"Const base type cannot be a Python object")
const = PyrexTypes.c_const_type(base_type)
- return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
+ return self.base.analyse(const, env, nonempty=nonempty, visibility=visibility, in_pxd=in_pxd)
class CArgDeclNode(Node):
@@ -905,8 +905,8 @@ class CArgDeclNode(Node):
base_type = base_type.base_type
# inject type declaration from annotations
- # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
- if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
+ # this is called without 'env' by AdjustDefByDirectives transform before declaration analysis
+ if self.annotation and env and env.directives['annotation_typing'] and self.base_type.name is None:
arg_type = self.inject_type_from_annotations(env)
if arg_type is not None:
base_type = arg_type
@@ -918,7 +918,7 @@ class CArgDeclNode(Node):
annotation = self.annotation
if not annotation:
return None
- base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
+ base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
if base_type is not None:
self.base_type = base_type
return arg_type
@@ -1155,7 +1155,7 @@ class TemplatedTypeNode(CBaseTypeNode):
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
- type = error_type
+ type = error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
@@ -1176,8 +1176,8 @@ class TemplatedTypeNode(CBaseTypeNode):
for name, value in options.items()])
self.type = PyrexTypes.BufferType(base_type, **options)
- if has_np_pythran(env) and is_pythran_buffer(self.type):
- self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
+ if has_np_pythran(env) and is_pythran_buffer(self.type):
+ self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
else:
# Array
@@ -1352,11 +1352,11 @@ class CVarDefNode(StatNode):
if create_extern_wrapper:
declarator.overridable = False
if isinstance(declarator, CFuncDeclaratorNode):
- name_declarator, type = declarator.analyse(
- base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
+ name_declarator, type = declarator.analyse(
+ base_type, env, directive_locals=self.directive_locals, visibility=visibility, in_pxd=self.in_pxd)
else:
- name_declarator, type = declarator.analyse(
- base_type, env, visibility=visibility, in_pxd=self.in_pxd)
+ name_declarator, type = declarator.analyse(
+ base_type, env, visibility=visibility, in_pxd=self.in_pxd)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos, "Variable type '%s' is incomplete" % type)
@@ -1367,8 +1367,8 @@ class CVarDefNode(StatNode):
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
- if type.is_reference and self.visibility != 'extern':
- error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
+ if type.is_reference and self.visibility != 'extern':
+ error(declarator.pos, "C++ references cannot be declared; use a pointer instead")
if type.is_cfunction:
if 'staticmethod' in env.directives:
type.is_static_method = True
@@ -1611,8 +1611,8 @@ class CTypeDefNode(StatNode):
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
- name_declarator, type = self.declarator.analyse(
- base, env, visibility=self.visibility, in_pxd=self.in_pxd)
+ name_declarator, type = self.declarator.analyse(
+ base, env, visibility=self.visibility, in_pxd=self.in_pxd)
name = name_declarator.name
cname = name_declarator.cname
@@ -1684,18 +1684,18 @@ class FuncDefNode(StatNode, BlockNode):
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
- def analyse_annotation(self, env, annotation):
- # Annotations can not only contain valid Python expressions but arbitrary type references.
- if annotation is None:
- return None
- if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
- annotation = annotation.analyse_types(env)
- return annotation
-
+ def analyse_annotation(self, env, annotation):
+ # Annotations can not only contain valid Python expressions but arbitrary type references.
+ if annotation is None:
+ return None
+ if not env.directives['annotation_typing'] or annotation.analyse_as_type(env) is None:
+ annotation = annotation.analyse_types(env)
+ return annotation
+
def analyse_annotations(self, env):
for arg in self.args:
if arg.annotation:
- arg.annotation = self.analyse_annotation(env, arg.annotation)
+ arg.annotation = self.analyse_annotation(env, arg.annotation)
def align_argument_type(self, env, arg):
# @cython.locals()
@@ -1869,16 +1869,16 @@ class FuncDefNode(StatNode, BlockNode):
code.declare_gilstate()
if profile or linetrace:
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- tempvardecl_code.put_trace_declarations()
- code_object = self.code_object.calculate_result_code(code) if self.code_object else None
- code.put_trace_frame_init(code_object)
-
- # ----- Special check for getbuffer
- if is_getbuffer_slot:
- self.getbuffer_check(code)
-
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ tempvardecl_code.put_trace_declarations()
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
+
+ # ----- Special check for getbuffer
+ if is_getbuffer_slot:
+ self.getbuffer_check(code)
+
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
@@ -1904,8 +1904,8 @@ class FuncDefNode(StatNode, BlockNode):
# Scope unconditionally DECREFed on return.
code.putln("%s = %s;" % (
Naming.cur_scope_cname,
- lenv.scope_class.type.cast_code("Py_None")))
- code.put_incref("Py_None", py_object_type)
+ lenv.scope_class.type.cast_code("Py_None")))
+ code.put_incref("Py_None", py_object_type)
code.putln(code.error_goto(self.pos))
code.putln("} else {")
code.put_gotref(Naming.cur_scope_cname)
@@ -1932,14 +1932,14 @@ class FuncDefNode(StatNode, BlockNode):
if profile or linetrace:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- if self.is_wrapper:
- trace_name = self.entry.name + " (wrapper)"
- else:
- trace_name = self.entry.name
- code.put_trace_call(
- trace_name, self.pos, nogil=not code.funcstate.gil_owned)
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.is_wrapper:
+ trace_name = self.entry.name + " (wrapper)"
+ else:
+ trace_name = self.entry.name
+ code.put_trace_call(
+ trace_name, self.pos, nogil=not code.funcstate.gil_owned)
code.funcstate.can_trace = True
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
@@ -1952,7 +1952,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
- # we acquire arguments from object conversion, so we have
+ # we acquire arguments from object conversion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
@@ -2001,8 +2001,8 @@ class FuncDefNode(StatNode, BlockNode):
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
- elif not self.return_type.is_void:
- code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
# ----- Error cleanup
if code.error_label in code.labels_used:
if not self.body.is_terminator:
@@ -2058,8 +2058,8 @@ class FuncDefNode(StatNode, BlockNode):
if err_val is not None:
if err_val != Naming.retval_cname:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
- elif not self.return_type.is_void:
- code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
+ elif not self.return_type.is_void:
+ code.putln("__Pyx_pretend_to_initialize(&%s);" % Naming.retval_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
@@ -2141,14 +2141,14 @@ class FuncDefNode(StatNode, BlockNode):
if profile or linetrace:
code.funcstate.can_trace = False
- if not self.is_generator:
- # generators are traced when iterated, not at creation
- if self.return_type.is_pyobject:
- code.put_trace_return(
- Naming.retval_cname, nogil=not code.funcstate.gil_owned)
- else:
- code.put_trace_return(
- "Py_None", nogil=not code.funcstate.gil_owned)
+ if not self.is_generator:
+ # generators are traced when iterated, not at creation
+ if self.return_type.is_pyobject:
+ code.put_trace_return(
+ Naming.retval_cname, nogil=not code.funcstate.gil_owned)
+ else:
+ code.put_trace_return(
+ "Py_None", nogil=not code.funcstate.gil_owned)
if not lenv.nogil:
# GIL holding function
@@ -2181,10 +2181,10 @@ class FuncDefNode(StatNode, BlockNode):
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
- entry = env.declare_arg(arg.name, arg.type, arg.pos)
- if arg.annotation:
- entry.annotation = arg.annotation
- return entry
+ entry = env.declare_arg(arg.name, arg.type, arg.pos)
+ if arg.annotation:
+ entry.annotation = arg.annotation
+ return entry
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
@@ -2230,59 +2230,59 @@ class FuncDefNode(StatNode, BlockNode):
#
# Special code for the __getbuffer__ function
#
- def _get_py_buffer_info(self):
- py_buffer = self.local_scope.arg_entries[1]
- try:
- # Check builtin definition of struct Py_buffer
- obj_type = py_buffer.type.base_type.scope.entries['obj'].type
- except (AttributeError, KeyError):
- # User code redeclared struct Py_buffer
- obj_type = None
- return py_buffer, obj_type
-
- # Old Python 3 used to support write-locks on buffer-like objects by
- # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
- # feature is obsolete, it was almost never used (only one instance in
- # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
- # (see bpo-14203). We add an extra check here to prevent legacy code from
- # from trying to use the feature and prevent segmentation faults.
- def getbuffer_check(self, code):
- py_buffer, _ = self._get_py_buffer_info()
- view = py_buffer.cname
- code.putln("if (%s == NULL) {" % view)
- code.putln("PyErr_SetString(PyExc_BufferError, "
- "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
- code.putln("return -1;")
+ def _get_py_buffer_info(self):
+ py_buffer = self.local_scope.arg_entries[1]
+ try:
+ # Check builtin definition of struct Py_buffer
+ obj_type = py_buffer.type.base_type.scope.entries['obj'].type
+ except (AttributeError, KeyError):
+ # User code redeclared struct Py_buffer
+ obj_type = None
+ return py_buffer, obj_type
+
+ # Old Python 3 used to support write-locks on buffer-like objects by
+ # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
+ # feature is obsolete, it was almost never used (only one instance in
+ # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
+ # (see bpo-14203). We add an extra check here to prevent legacy code from
+ # from trying to use the feature and prevent segmentation faults.
+ def getbuffer_check(self, code):
+ py_buffer, _ = self._get_py_buffer_info()
+ view = py_buffer.cname
+ code.putln("if (%s == NULL) {" % view)
+ code.putln("PyErr_SetString(PyExc_BufferError, "
+ "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
+ code.putln("return -1;")
code.putln("}")
- def getbuffer_init(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.put_init_to_py_none("%s->obj" % view, obj_type)
- code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
- else:
- code.putln("%s->obj = NULL;" % view)
-
+ def getbuffer_init(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.put_init_to_py_none("%s->obj" % view, obj_type)
+ code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ else:
+ code.putln("%s->obj = NULL;" % view)
+
def getbuffer_error_cleanup(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.putln("if (%s->obj != NULL) {" % view)
- code.put_gotref("%s->obj" % view)
- code.put_decref_clear("%s->obj" % view, obj_type)
- code.putln("}")
- else:
- code.putln("Py_CLEAR(%s->obj);" % view)
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj != NULL) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+ else:
+ code.putln("Py_CLEAR(%s->obj);" % view)
def getbuffer_normal_cleanup(self, code):
- py_buffer, obj_type = self._get_py_buffer_info()
- view = py_buffer.cname
- if obj_type and obj_type.is_pyobject:
- code.putln("if (%s->obj == Py_None) {" % view)
- code.put_gotref("%s->obj" % view)
- code.put_decref_clear("%s->obj" % view, obj_type)
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj == Py_None) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
@@ -2358,10 +2358,10 @@ class CFuncDefNode(FuncDefNode):
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(
base_type, env, nonempty=2 * (self.body is not None),
- directive_locals=self.directive_locals, visibility=self.visibility)
+ directive_locals=self.directive_locals, visibility=self.visibility)
else:
name_declarator, type = self.declarator.analyse(
- base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
+ base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility)
if not type.is_cfunction:
error(self.pos, "Suite attached to non-function declaration")
# Remember the actual type according to the function header
@@ -2400,7 +2400,7 @@ class CFuncDefNode(FuncDefNode):
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
- if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
+ if type_arg.type.is_buffer or type_arg.type.is_pythran_expr:
if self.type.nogil:
error(formal_arg.pos,
"Buffer may not be acquired without the GIL. Consider using memoryview slices instead.")
@@ -2752,9 +2752,9 @@ class DefNode(FuncDefNode):
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
outer_attrs = ["decorators", "return_type_annotation"]
- is_staticmethod = False
- is_classmethod = False
-
+ is_staticmethod = False
+ is_classmethod = False
+
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
@@ -2797,22 +2797,22 @@ class DefNode(FuncDefNode):
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
- exception_value, exception_check = except_val or (None, False)
-
+ exception_value, exception_check = except_val or (None, False)
+
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
- annotation=formal_arg.annotation,
+ annotation=formal_arg.annotation,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
args=cfunc_args,
has_varargs=False,
exception_value=None,
- exception_check=exception_check,
+ exception_check=exception_check,
nogil=nogil,
with_gil=with_gil,
is_overridable=overridable)
@@ -2830,10 +2830,10 @@ class DefNode(FuncDefNode):
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
-
- if exception_value is None and cfunc_type.exception_value is not None:
- from .ExprNodes import ConstNode
- exception_value = ConstNode(
+
+ if exception_value is None and cfunc_type.exception_value is not None:
+ from .ExprNodes import ConstNode
+ exception_value = ConstNode(
self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
@@ -2898,7 +2898,7 @@ class DefNode(FuncDefNode):
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
- _, return_type = analyse_type_annotation(self.return_type_annotation, env)
+ _, return_type = analyse_type_annotation(self.return_type_annotation, env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
@@ -2926,13 +2926,13 @@ class DefNode(FuncDefNode):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
- # If we hare in pythran mode and we got a buffer supported by
- # Pythran, we change this node to a fused type
- if has_np_pythran(env) and base_type.is_pythran_expr:
- base_type = PyrexTypes.FusedType([
- base_type,
- #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
- base_type.org_buffer])
+ # If we hare in pythran mode and we got a buffer supported by
+ # Pythran, we change this node to a fused type
+ if has_np_pythran(env) and base_type.is_pythran_expr:
+ base_type = PyrexTypes.FusedType([
+ base_type,
+ #PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
+ base_type.org_buffer])
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
@@ -2973,11 +2973,11 @@ class DefNode(FuncDefNode):
error(arg.pos, "Only Python type arguments can have 'or None'")
env.fused_to_specific = f2s
- if has_np_pythran(env):
- self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
- else:
- self.np_args_idx = []
-
+ if has_np_pythran(env):
+ self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
+ else:
+ self.np_args_idx = []
+
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
@@ -3133,7 +3133,7 @@ class DefNode(FuncDefNode):
self.analyse_default_values(env)
self.analyse_annotations(env)
if self.return_type_annotation:
- self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
+ self.return_type_annotation = self.analyse_annotation(env, self.return_type_annotation)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
@@ -3208,10 +3208,10 @@ class DefNode(FuncDefNode):
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
- if arg_code_list:
- arg_code = ', '.join(arg_code_list)
- else:
- arg_code = 'void' # No arguments
+ if arg_code_list:
+ arg_code = ', '.join(arg_code_list)
+ else:
+ arg_code = 'void' # No arguments
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
@@ -3276,8 +3276,8 @@ class DefNodeWrapper(FuncDefNode):
self.signature = target_entry.signature
- self.np_args_idx = self.target.np_args_idx
-
+ self.np_args_idx = self.target.np_args_idx
+
def prepare_argument_coercion(self, env):
# This is only really required for Cython utility code at this time,
# everything else can be done during code generation. But we expand
@@ -3705,8 +3705,8 @@ class DefNodeWrapper(FuncDefNode):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
@@ -3730,12 +3730,12 @@ class DefNodeWrapper(FuncDefNode):
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
- if i != reversed_args[0][0]:
- code.putln('CYTHON_FALLTHROUGH;')
+ if i != reversed_args[0][0]:
+ code.putln('CYTHON_FALLTHROUGH;')
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
- code.putln('CYTHON_FALLTHROUGH;')
+ code.putln('CYTHON_FALLTHROUGH;')
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
@@ -3777,12 +3777,12 @@ class DefNodeWrapper(FuncDefNode):
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
- if arg.type.from_py_function:
+ if arg.type.from_py_function:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
- code.putln(arg.type.from_py_call_code(
- item, arg.entry.cname, arg.pos, code))
+ code.putln(arg.type.from_py_call_code(
+ item, arg.entry.cname, arg.pos, code))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
@@ -3855,7 +3855,7 @@ class DefNodeWrapper(FuncDefNode):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
- code.putln('CYTHON_FALLTHROUGH;')
+ code.putln('CYTHON_FALLTHROUGH;')
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
@@ -3883,8 +3883,8 @@ class DefNodeWrapper(FuncDefNode):
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
- if i != 0:
- code.putln('CYTHON_FALLTHROUGH;')
+ if i != 0:
+ code.putln('CYTHON_FALLTHROUGH;')
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
@@ -3896,12 +3896,12 @@ class DefNodeWrapper(FuncDefNode):
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
- code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
+ code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
@@ -3922,8 +3922,8 @@ class DefNodeWrapper(FuncDefNode):
code.putln('}')
elif arg.kw_only:
code.putln('else {')
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseKeywordRequired", "FunctionArguments.c"))
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
@@ -3987,7 +3987,7 @@ class DefNodeWrapper(FuncDefNode):
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
- code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
@@ -4024,13 +4024,13 @@ class DefNodeWrapper(FuncDefNode):
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
# copied from CoerceFromPyTypeNode
- if new_type.from_py_function:
- code.putln(new_type.from_py_call_code(
- arg.hdr_cname,
- arg.entry.cname,
- arg.pos,
- code,
- ))
+ if new_type.from_py_function:
+ code.putln(new_type.from_py_call_code(
+ arg.hdr_cname,
+ arg.entry.cname,
+ arg.pos,
+ code,
+ ))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
@@ -4071,9 +4071,9 @@ class GeneratorDefNode(DefNode):
is_generator = True
is_coroutine = False
- is_iterable_coroutine = False
- is_asyncgen = False
- gen_type_name = 'Generator'
+ is_iterable_coroutine = False
+ is_asyncgen = False
+ gen_type_name = 'Generator'
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
@@ -4096,10 +4096,10 @@ class GeneratorDefNode(DefNode):
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
- '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
- self.gen_type_name,
- body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
- Naming.cur_scope_cname, name, qualname, module_name,
+ '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
+ self.gen_type_name,
+ body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
+ Naming.cur_scope_cname, name, qualname, module_name,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
@@ -4113,40 +4113,40 @@ class GeneratorDefNode(DefNode):
code.putln('}')
def generate_function_definitions(self, env, code):
- env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c"))
+ env.use_utility_code(UtilityCode.load_cached(self.gen_type_name, "Coroutine.c"))
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class AsyncDefNode(GeneratorDefNode):
- gen_type_name = 'Coroutine'
+ gen_type_name = 'Coroutine'
is_coroutine = True
-class IterableAsyncDefNode(AsyncDefNode):
- gen_type_name = 'IterableCoroutine'
- is_iterable_coroutine = True
-
-
-class AsyncGenNode(AsyncDefNode):
- gen_type_name = 'AsyncGen'
- is_asyncgen = True
-
-
+class IterableAsyncDefNode(AsyncDefNode):
+ gen_type_name = 'IterableCoroutine'
+ is_iterable_coroutine = True
+
+
+class AsyncGenNode(AsyncDefNode):
+ gen_type_name = 'AsyncGen'
+ is_asyncgen = True
+
+
class GeneratorBodyDefNode(DefNode):
# Main code body of a generator implemented as a DefNode.
#
is_generator_body = True
is_inlined = False
- is_async_gen_body = False
+ is_async_gen_body = False
inlined_comprehension_type = None # container type for inlined comprehensions
- def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False):
+ def __init__(self, pos=None, name=None, body=None, is_async_gen_body=False):
super(GeneratorBodyDefNode, self).__init__(
- pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body,
- doc=None, args=[], star_arg=None, starstar_arg=None)
+ pos=pos, body=body, name=name, is_async_gen_body=is_async_gen_body,
+ doc=None, args=[], star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
@@ -4167,10 +4167,10 @@ class GeneratorBodyDefNode(DefNode):
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
- header = "static PyObject *%s(PyObject *%s_obj, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % (
+ header = "static PyObject *%s(PyObject *%s_obj, CYTHON_UNUSED PyThreadState *%s, PyObject *%s)" % (
self.entry.func_cname,
Naming.generator_cname,
- Naming.local_tstate_cname,
+ Naming.local_tstate_cname,
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
@@ -4199,14 +4199,14 @@ class GeneratorBodyDefNode(DefNode):
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
- code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name)
- profile = code.globalstate.directives['profile']
- linetrace = code.globalstate.directives['linetrace']
- if profile or linetrace:
- tempvardecl_code.put_trace_declarations()
- code.funcstate.can_trace = True
- code_object = self.code_object.calculate_result_code(code) if self.code_object else None
- code.put_trace_frame_init(code_object)
+ code.put_setup_refcount_context(self.entry.name or self.entry.qualified_name)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ tempvardecl_code.put_trace_declarations()
+ code.funcstate.can_trace = True
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
@@ -4237,7 +4237,7 @@ class GeneratorBodyDefNode(DefNode):
# ----- Function body
self.generate_function_body(env, code)
# ----- Closure initialization
- if lenv.scope_class.type.scope.var_entries:
+ if lenv.scope_class.type.scope.var_entries:
closure_init_code.putln('%s = %s;' % (
lenv.scope_class.type.declaration_code(Naming.cur_scope_cname),
lenv.scope_class.type.cast_code('%s->closure' %
@@ -4245,9 +4245,9 @@ class GeneratorBodyDefNode(DefNode):
# FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
- if profile or linetrace:
- code.funcstate.can_trace = False
-
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
@@ -4255,13 +4255,13 @@ class GeneratorBodyDefNode(DefNode):
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
- if self.is_async_gen_body:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
- code.putln('PyErr_SetNone(%s);' % (
- '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
+ if self.is_async_gen_body:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.putln('PyErr_SetNone(%s);' % (
+ '__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
# ----- Error cleanup
- if code.label_used(code.error_label):
+ if code.label_used(code.error_label):
if not self.body.is_terminator:
code.put_goto(code.return_label)
code.put_label(code.error_label)
@@ -4270,7 +4270,7 @@ class GeneratorBodyDefNode(DefNode):
if Future.generator_stop in env.global_scope().context.future_directives:
# PEP 479: turn accidental StopIteration exceptions into a RuntimeError
code.globalstate.use_utility_code(UtilityCode.load_cached("pep479", "Coroutine.c"))
- code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body))
+ code.putln("__Pyx_Generator_Replace_StopIteration(%d);" % bool(self.is_async_gen_body))
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
@@ -4283,14 +4283,14 @@ class GeneratorBodyDefNode(DefNode):
code.put_xdecref_clear(Naming.retval_cname, py_object_type)
# For Py3.7, clearing is already done below.
code.putln("#if !CYTHON_USE_EXC_INFO_STACK")
- code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
code.putln("#endif")
code.putln('%s->resume_label = -1;' % Naming.generator_cname)
# clean up as early as possible to help breaking any reference cycles
code.putln('__Pyx_Coroutine_clear((PyObject*)%s);' % Naming.generator_cname)
- if profile or linetrace:
- code.put_trace_return(Naming.retval_cname,
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
@@ -4298,20 +4298,20 @@ class GeneratorBodyDefNode(DefNode):
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
- if profile or linetrace:
- resume_code.put_trace_call(self.entry.qualified_name, self.pos,
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ resume_code.put_trace_call(self.entry.qualified_name, self.pos,
+ nogil=not code.funcstate.gil_owned)
resume_code.putln("switch (%s->resume_label) {" % (
Naming.generator_cname))
-
+
resume_code.putln("case 0: goto %s;" % first_run_label)
for i, label in code.yield_labels:
resume_code.putln("case %d: goto %s;" % (i, label))
resume_code.putln("default: /* CPython raises the right error here */")
- if profile or linetrace:
- resume_code.put_trace_return("Py_None",
- nogil=not code.funcstate.gil_owned)
+ if profile or linetrace:
+ resume_code.put_trace_return("Py_None",
+ nogil=not code.funcstate.gil_owned)
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;")
resume_code.putln("}")
@@ -4321,7 +4321,7 @@ class GeneratorBodyDefNode(DefNode):
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overridden.
+ # is overridden.
#
# py_func
#
@@ -4539,7 +4539,7 @@ class PyClassDefNode(ClassDefNode):
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
- from . import ExprNodes
+ from . import ExprNodes
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
@@ -4651,7 +4651,7 @@ class CClassDefNode(ClassDefNode):
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
- # bases TupleNode Base class(es)
+ # bases TupleNode Base class(es)
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
@@ -4734,34 +4734,34 @@ class CClassDefNode(ClassDefNode):
self.module.has_extern_class = 1
env.add_imported_module(self.module)
- if self.bases.args:
- base = self.bases.args[0]
- base_type = base.analyse_as_type(env)
- if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
- # Use the Python rather than C variant of these types.
- base_type = env.lookup(base_type.sign_and_name()).type
- if base_type is None:
- error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
- elif base_type == PyrexTypes.py_object_type:
- base_class_scope = None
- elif not base_type.is_extension_type and \
- not (base_type.is_builtin_type and base_type.objstruct_cname):
- error(base.pos, "'%s' is not an extension type" % base_type)
- elif not base_type.is_complete():
- error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
- base_type.name, self.class_name))
- elif base_type.scope and base_type.scope.directives and \
- base_type.is_final_type:
- error(base.pos, "Base class '%s' of type '%s' is final" % (
- base_type, self.class_name))
- elif base_type.is_builtin_type and \
- base_type.name in ('tuple', 'str', 'bytes'):
- error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
- % base_type.name)
+ if self.bases.args:
+ base = self.bases.args[0]
+ base_type = base.analyse_as_type(env)
+ if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
+ # Use the Python rather than C variant of these types.
+ base_type = env.lookup(base_type.sign_and_name()).type
+ if base_type is None:
+ error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
+ elif base_type == PyrexTypes.py_object_type:
+ base_class_scope = None
+ elif not base_type.is_extension_type and \
+ not (base_type.is_builtin_type and base_type.objstruct_cname):
+ error(base.pos, "'%s' is not an extension type" % base_type)
+ elif not base_type.is_complete():
+ error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
+ base_type.name, self.class_name))
+ elif base_type.scope and base_type.scope.directives and \
+ base_type.is_final_type:
+ error(base.pos, "Base class '%s' of type '%s' is final" % (
+ base_type, self.class_name))
+ elif base_type.is_builtin_type and \
+ base_type.name in ('tuple', 'str', 'bytes'):
+ error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
+ % base_type.name)
else:
- self.base_type = base_type
- if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
- warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
+ self.base_type = base_type
+ if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
+ warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
@@ -4822,28 +4822,28 @@ class CClassDefNode(ClassDefNode):
else:
scope.implemented = 1
- if len(self.bases.args) > 1:
- if not has_body or self.in_pxd:
- error(self.bases.args[1].pos, "Only declare first base in declaration.")
- # At runtime, we check that the other bases are heap types
- # and that a __dict__ is added if required.
- for other_base in self.bases.args[1:]:
- if other_base.analyse_as_type(env):
- error(other_base.pos, "Only one extension type base class allowed.")
- self.entry.type.early_init = 0
- from . import ExprNodes
- self.type_init_args = ExprNodes.TupleNode(
- self.pos,
- args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
- self.bases,
- ExprNodes.DictNode(self.pos, key_value_pairs=[])])
- elif self.base_type:
- self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
- self.type_init_args = None
- else:
- self.entry.type.early_init = 1
- self.type_init_args = None
-
+ if len(self.bases.args) > 1:
+ if not has_body or self.in_pxd:
+ error(self.bases.args[1].pos, "Only declare first base in declaration.")
+ # At runtime, we check that the other bases are heap types
+ # and that a __dict__ is added if required.
+ for other_base in self.bases.args[1:]:
+ if other_base.analyse_as_type(env):
+ error(other_base.pos, "Only one extension type base class allowed.")
+ self.entry.type.early_init = 0
+ from . import ExprNodes
+ self.type_init_args = ExprNodes.TupleNode(
+ self.pos,
+ args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
+ self.bases,
+ ExprNodes.DictNode(self.pos, key_value_pairs=[])])
+ elif self.base_type:
+ self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
+ self.type_init_args = None
+ else:
+ self.entry.type.early_init = 1
+ self.type_init_args = None
+
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
@@ -4853,8 +4853,8 @@ class CClassDefNode(ClassDefNode):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
- if self.type_init_args:
- self.type_init_args.analyse_expressions(env)
+ if self.type_init_args:
+ self.type_init_args.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
@@ -4868,175 +4868,175 @@ class CClassDefNode(ClassDefNode):
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
- if not self.entry.type.early_init:
- if self.type_init_args:
- self.type_init_args.generate_evaluation_code(code)
- bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
- first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
- # Let Python do the base types compatibility checking.
- trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
- code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
- trial_type, self.type_init_args.result()))
- code.putln(code.error_goto_if_null(trial_type, self.pos))
- code.put_gotref(trial_type)
- code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
- trial_type, first_base))
- code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
- code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
- trial_type, first_base))
- code.putln(code.error_goto(self.pos))
- code.putln("}")
- code.funcstate.release_temp(trial_type)
- code.put_incref(bases, PyrexTypes.py_object_type)
- code.put_giveref(bases)
- code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
- code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
- self.type_init_args.generate_disposal_code(code)
- self.type_init_args.free_temps(code)
-
- self.generate_type_ready_code(self.entry, code, True)
-
- # Also called from ModuleNode for early init types.
- @staticmethod
- def generate_type_ready_code(entry, code, heap_type_bases=False):
- # Generate a call to PyType_Ready for an extension
- # type defined in this module.
- type = entry.type
- typeobj_cname = type.typeobj_cname
- scope = type.scope
- if not scope: # could be None if there was an error
- return
- if entry.visibility != 'extern':
- for slot in TypeSlots.slot_table:
- slot.generate_dynamic_init_code(scope, code)
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
- readyfunc = "__Pyx_PyType_Ready"
- else:
- readyfunc = "PyType_Ready"
- code.putln(
- "if (%s(&%s) < 0) %s" % (
- readyfunc,
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Don't inherit tp_print from builtin types, restoring the
- # behavior of using tp_repr or tp_str instead.
+ if not self.entry.type.early_init:
+ if self.type_init_args:
+ self.type_init_args.generate_evaluation_code(code)
+ bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
+ # Let Python do the base types compatibility checking.
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
+ code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type, self.type_init_args.result()))
+ code.putln(code.error_goto_if_null(trial_type, self.pos))
+ code.put_gotref(trial_type)
+ code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ trial_type, first_base))
+ code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
+ code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
+ trial_type, first_base))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(trial_type)
+ code.put_incref(bases, PyrexTypes.py_object_type)
+ code.put_giveref(bases)
+ code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+ code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ self.type_init_args.generate_disposal_code(code)
+ self.type_init_args.free_temps(code)
+
+ self.generate_type_ready_code(self.entry, code, True)
+
+ # Also called from ModuleNode for early init types.
+ @staticmethod
+ def generate_type_ready_code(entry, code, heap_type_bases=False):
+ # Generate a call to PyType_Ready for an extension
+ # type defined in this module.
+ type = entry.type
+ typeobj_cname = type.typeobj_cname
+ scope = type.scope
+ if not scope: # could be None if there was an error
+ return
+ if entry.visibility != 'extern':
+ for slot in TypeSlots.slot_table:
+ slot.generate_dynamic_init_code(scope, code)
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ readyfunc = "__Pyx_PyType_Ready"
+ else:
+ readyfunc = "PyType_Ready"
+ code.putln(
+ "if (%s(&%s) < 0) %s" % (
+ readyfunc,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Don't inherit tp_print from builtin types, restoring the
+ # behavior of using tp_repr or tp_str instead.
# ("tp_print" was renamed to "tp_vectorcall_offset" in Py3.8b1)
code.putln("#if PY_VERSION_HEX < 0x030800B1")
- code.putln("%s.tp_print = 0;" % typeobj_cname)
+ code.putln("%s.tp_print = 0;" % typeobj_cname)
code.putln("#endif")
-
- # Use specialised attribute lookup for types with generic lookup but no instance dict.
- getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
- dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
- if getattr_slot_func == '0' and dictoffset_slot_func == '0':
- if type.is_final_type:
- py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
- utility_func = "PyObject_GenericGetAttrNoDict"
- else:
- py_cfunc = "__Pyx_PyObject_GenericGetAttr"
- utility_func = "PyObject_GenericGetAttr"
- code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
-
- code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
- " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
- typeobj_cname, typeobj_cname))
- code.putln("%s.tp_getattro = %s;" % (
- typeobj_cname, py_cfunc))
- code.putln("}")
-
- # Fix special method docstrings. This is a bit of a hack, but
- # unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
- for func in entry.type.scope.pyfunc_entries:
- is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
- if (func.is_special and Options.docstrings and
- func.wrapperbase_cname and not is_buffer):
- slot = TypeSlots.method_name_to_slot.get(func.name)
- preprocessor_guard = slot.preprocessor_guard_code() if slot else None
- if preprocessor_guard:
- code.putln(preprocessor_guard)
- code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
- code.putln("{")
- code.putln(
- 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
- typeobj_cname,
- func.name,
- code.error_goto_if_null('wrapper', entry.pos)))
- code.putln(
- "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
- code.putln(
- "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
- func.wrapperbase_cname))
- code.putln(
- "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
- code.putln(
- "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
- func.wrapperbase_cname))
- code.putln("}")
- code.putln("}")
- code.putln('#endif')
- if preprocessor_guard:
- code.putln('#endif')
- if type.vtable_cname:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
- code.putln(
- "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
- typeobj_cname,
- type.vtabptr_cname,
- code.error_goto(entry.pos)))
- if heap_type_bases:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
- code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
- typeobj_cname,
- code.error_goto(entry.pos)))
+
+ # Use specialised attribute lookup for types with generic lookup but no instance dict.
+ getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
+ dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
+ if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ if type.is_final_type:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
+ utility_func = "PyObject_GenericGetAttrNoDict"
+ else:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ utility_func = "PyObject_GenericGetAttr"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
+
+ code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
+ " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeobj_cname, typeobj_cname))
+ code.putln("%s.tp_getattro = %s;" % (
+ typeobj_cname, py_cfunc))
+ code.putln("}")
+
+ # Fix special method docstrings. This is a bit of a hack, but
+ # unless we let PyType_Ready create the slot wrappers we have
+ # a significant performance hit. (See trac #561.)
+ for func in entry.type.scope.pyfunc_entries:
+ is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
+ if (func.is_special and Options.docstrings and
+ func.wrapperbase_cname and not is_buffer):
+ slot = TypeSlots.method_name_to_slot.get(func.name)
+ preprocessor_guard = slot.preprocessor_guard_code() if slot else None
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("{")
+ code.putln(
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
+ typeobj_cname,
+ func.name,
+ code.error_goto_if_null('wrapper', entry.pos)))
+ code.putln(
+ "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ code.putln(
+ "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
+ func.wrapperbase_cname))
+ code.putln(
+ "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
+ code.putln(
+ "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
+ func.wrapperbase_cname))
+ code.putln("}")
+ code.putln("}")
+ code.putln('#endif')
+ if preprocessor_guard:
+ code.putln('#endif')
+ if type.vtable_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
+ code.putln(
+ "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
+ typeobj_cname,
+ type.vtabptr_cname,
+ code.error_goto(entry.pos)))
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
if not type.scope.is_internal and not type.scope.directives.get('internal'):
- # scope.is_internal is set for types defined by
- # Cython (such as closures), the 'internal'
- # directive is set by users
- code.putln(
+ # scope.is_internal is set for types defined by
+ # Cython (such as closures), the 'internal'
+ # directive is set by users
+ code.putln(
'if (PyObject_SetAttr(%s, %s, (PyObject *)&%s) < 0) %s' % (
- Naming.module_cname,
+ Naming.module_cname,
code.intern_identifier(scope.class_name),
- typeobj_cname,
- code.error_goto(entry.pos)))
- weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
- if weakref_entry:
- if weakref_entry.type is py_object_type:
- tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
- if type.typedef_flag:
- objstruct = type.objstruct_cname
- else:
- objstruct = "struct %s" % type.objstruct_cname
- code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
- tp_weaklistoffset,
- tp_weaklistoffset,
- objstruct,
- weakref_entry.cname))
- else:
- error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
- if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
- # Unfortunately, we cannot reliably detect whether a
- # superclass defined __reduce__ at compile time, so we must
- # do so at runtime.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Generate code to initialise the typeptr of an extension
- # type defined in this module to point to its type object.
- if type.typeobj_cname:
- code.putln(
- "%s = &%s;" % (
- type.typeptr_cname, type.typeobj_cname))
-
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_entry:
+ if weakref_entry.type is py_object_type:
+ tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
+ tp_weaklistoffset,
+ tp_weaklistoffset,
+ objstruct,
+ weakref_entry.cname))
+ else:
+ error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+ if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
+ # Unfortunately, we cannot reliably detect whether a
+ # superclass defined __reduce__ at compile time, so we must
+ # do so at runtime.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
+ code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Generate code to initialise the typeptr of an extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.typeptr_cname, type.typeobj_cname))
+
def annotate(self, code):
- if self.type_init_args:
- self.type_init_args.annotate(code)
+ if self.type_init_args:
+ self.type_init_args.annotate(code)
if self.body:
self.body.annotate(code)
@@ -5115,13 +5115,13 @@ class ExprStatNode(StatNode):
def analyse_declarations(self, env):
from . import ExprNodes
- expr = self.expr
- if isinstance(expr, ExprNodes.GeneralCallNode):
- func = expr.function.as_cython_attribute()
+ expr = self.expr
+ if isinstance(expr, ExprNodes.GeneralCallNode):
+ func = expr.function.as_cython_attribute()
if func == u'declare':
- args, kwds = expr.explicit_args_kwds()
+ args, kwds = expr.explicit_args_kwds()
if len(args):
- error(expr.pos, "Variable names must be specified.")
+ error(expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
@@ -5129,20 +5129,20 @@ class ExprStatNode(StatNode):
else:
env.declare_var(var.value, type, var.pos, is_cdef=True)
self.__class__ = PassStatNode
- elif getattr(expr, 'annotation', None) is not None:
- if expr.is_name:
- # non-code variable annotation, e.g. "name: type"
- expr.declare_from_annotation(env)
- self.__class__ = PassStatNode
- elif expr.is_attribute or expr.is_subscript:
- # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type"
- self.__class__ = PassStatNode
+ elif getattr(expr, 'annotation', None) is not None:
+ if expr.is_name:
+ # non-code variable annotation, e.g. "name: type"
+ expr.declare_from_annotation(env)
+ self.__class__ = PassStatNode
+ elif expr.is_attribute or expr.is_subscript:
+ # unused expression with annotation, e.g. "a[0]: type" or "a.xyz : type"
+ self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
- # Repeat in case of node replacement.
- self.expr.result_is_used = False # hint that .result() may safely be left empty
+ # Repeat in case of node replacement.
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
return self
def nogil_check(self, env):
@@ -5153,13 +5153,13 @@ class ExprStatNode(StatNode):
def generate_execution_code(self, code):
code.mark_pos(self.pos)
- self.expr.result_is_used = False # hint that .result() may safely be left empty
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
- result = self.expr.result()
- if not self.expr.type.is_void:
- result = "(void)(%s)" % result
- code.putln("%s;" % result)
+ result = self.expr.result()
+ if not self.expr.type.is_void:
+ result = "(void)(%s)" % result
+ code.putln("%s;" % result)
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
@@ -5873,9 +5873,9 @@ class DelStatNode(StatNode):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
- arg.generate_evaluation_code(code)
+ arg.generate_evaluation_code(code)
code.putln("delete %s;" % arg.result())
- arg.generate_disposal_code(code)
+ arg.generate_disposal_code(code)
arg.free_temps(code)
# else error reported earlier
@@ -5905,7 +5905,7 @@ class IndirectionNode(StatListNode):
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats)
-
+
class BreakStatNode(StatNode):
child_attrs = []
@@ -5944,12 +5944,12 @@ class ReturnStatNode(StatNode):
# value ExprNode or None
# return_type PyrexType
# in_generator return inside of generator => raise StopIteration
- # in_async_gen return inside of async generator
+ # in_async_gen return inside of async generator
child_attrs = ["value"]
is_terminator = True
in_generator = False
- in_async_gen = False
+ in_async_gen = False
# Whether we are in a parallel section
in_parallel = False
@@ -5961,8 +5961,8 @@ class ReturnStatNode(StatNode):
error(self.pos, "Return not inside a function body")
return self
if self.value:
- if self.in_async_gen:
- error(self.pos, "Return with value in async generator")
+ if self.in_async_gen:
+ error(self.pos, "Return with value in async generator")
self.value = self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos, "Return with value in void function")
@@ -5986,23 +5986,23 @@ class ReturnStatNode(StatNode):
if not self.return_type:
# error reported earlier
return
-
- value = self.value
+
+ value = self.value
if self.return_type.is_pyobject:
- code.put_xdecref(Naming.retval_cname, self.return_type)
- if value and value.is_none:
- # Use specialised default handling for "return None".
- value = None
+ code.put_xdecref(Naming.retval_cname, self.return_type)
+ if value and value.is_none:
+ # Use specialised default handling for "return None".
+ value = None
- if value:
- value.generate_evaluation_code(code)
+ if value:
+ value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
- lhs_pos=value.pos,
- rhs=value,
+ lhs_pos=value.pos,
+ rhs=value,
code=code,
have_gil=self.in_nogil_context)
value.generate_post_assignment_code(code)
@@ -6012,22 +6012,22 @@ class ReturnStatNode(StatNode):
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
- value.py_result()))
- value.generate_disposal_code(code)
+ value.py_result()))
+ value.generate_disposal_code(code)
else:
- value.make_owned_reference(code)
+ value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
- value.result_as(self.return_type)))
+ value.result_as(self.return_type)))
value.generate_post_assignment_code(code)
- value.free_temps(code)
+ value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
- if self.in_async_gen:
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
- code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
+ if self.in_async_gen:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
@@ -6103,8 +6103,8 @@ class RaiseStatNode(StatNode):
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
- if self.exc_type.is_name:
- code.globalstate.use_entry_utility_code(self.exc_type.entry)
+ if self.exc_type.is_name:
+ code.globalstate.use_entry_utility_code(self.exc_type.entry)
else:
type_code = "0"
if self.exc_value:
@@ -6272,13 +6272,13 @@ class IfStatNode(StatNode):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
- if self.else_clause:
- # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
- self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
- else:
+ if self.else_clause:
+ # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
+ self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
+ else:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
- self._set_branch_hint(if_clause, if_clause.body)
+ self._set_branch_hint(if_clause, if_clause.body)
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
@@ -6287,21 +6287,21 @@ class IfStatNode(StatNode):
code.putln("}")
code.put_label(end_label)
- def _set_branch_hint(self, clause, statements_node, inverse=False):
- if not statements_node.is_terminator:
- return
- if not isinstance(statements_node, StatListNode) or not statements_node.stats:
- return
- # Anything that unconditionally raises exceptions should be considered unlikely.
- if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
- if len(statements_node.stats) > 1:
- # Allow simple statements before the 'raise', but no conditions, loops, etc.
- non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
- for node in statements_node.stats[:-1]:
- if not isinstance(node, non_branch_nodes):
- return
- clause.branch_hint = 'likely' if inverse else 'unlikely'
-
+ def _set_branch_hint(self, clause, statements_node, inverse=False):
+ if not statements_node.is_terminator:
+ return
+ if not isinstance(statements_node, StatListNode) or not statements_node.stats:
+ return
+ # Anything that unconditionally raises exceptions should be considered unlikely.
+ if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
+ if len(statements_node.stats) > 1:
+ # Allow simple statements before the 'raise', but no conditions, loops, etc.
+ non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
+ for node in statements_node.stats[:-1]:
+ if not isinstance(node, non_branch_nodes):
+ return
+ clause.branch_hint = 'likely' if inverse else 'unlikely'
+
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
@@ -6322,7 +6322,7 @@ class IfClauseNode(Node):
# body StatNode
child_attrs = ["condition", "body"]
- branch_hint = None
+ branch_hint = None
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
@@ -6335,10 +6335,10 @@ class IfClauseNode(Node):
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
- condition = self.condition.result()
- if self.branch_hint:
- condition = '%s(%s)' % (self.branch_hint, condition)
- code.putln("if (%s) {" % condition)
+ condition = self.condition.result()
+ if self.branch_hint:
+ condition = '%s(%s)' % (self.branch_hint, condition)
+ code.putln("if (%s) {" % condition)
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
@@ -6595,66 +6595,66 @@ class DictIterationNextNode(Node):
var.release(code)
-class SetIterationNextNode(Node):
- # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
- # and checking the set size for changes. Created in Optimize.py.
- child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
- 'coerced_value_var', 'value_target', 'is_set_flag']
-
- coerced_value_var = value_ref = None
-
- def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
- Node.__init__(
- self, set_obj.pos,
- set_obj=set_obj,
- expected_size=expected_size,
- pos_index_var=pos_index_var,
- value_target=value_target,
- is_set_flag=is_set_flag,
- is_temp=True,
- type=PyrexTypes.c_bint_type)
-
- def analyse_expressions(self, env):
- from . import ExprNodes
- self.set_obj = self.set_obj.analyse_types(env)
- self.expected_size = self.expected_size.analyse_types(env)
- self.pos_index_var = self.pos_index_var.analyse_types(env)
- self.value_target = self.value_target.analyse_target_types(env)
- self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
- self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
- self.is_set_flag = self.is_set_flag.analyse_types(env)
- return self
-
- def generate_function_definitions(self, env, code):
- self.set_obj.generate_function_definitions(env, code)
-
- def generate_execution_code(self, code):
- code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
- self.set_obj.generate_evaluation_code(code)
-
- value_ref = self.value_ref
- value_ref.allocate(code)
-
- result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
- code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
- result_temp,
- self.set_obj.py_result(),
- self.expected_size.result(),
- self.pos_index_var.result(),
- value_ref.result(),
- self.is_set_flag.result()
- ))
- code.putln("if (unlikely(%s == 0)) break;" % result_temp)
- code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
- code.funcstate.release_temp(result_temp)
-
- # evaluate all coercions before the assignments
- code.put_gotref(value_ref.result())
- self.coerced_value_var.generate_evaluation_code(code)
- self.value_target.generate_assignment_code(self.coerced_value_var, code)
- value_ref.release(code)
-
-
+class SetIterationNextNode(Node):
+ # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
+ # and checking the set size for changes. Created in Optimize.py.
+ child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
+ 'coerced_value_var', 'value_target', 'is_set_flag']
+
+ coerced_value_var = value_ref = None
+
+ def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
+ Node.__init__(
+ self, set_obj.pos,
+ set_obj=set_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ value_target=value_target,
+ is_set_flag=is_set_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.set_obj = self.set_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ self.is_set_flag = self.is_set_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.set_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
+ self.set_obj.generate_evaluation_code(code)
+
+ value_ref = self.value_ref
+ value_ref.allocate(code)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
+ result_temp,
+ self.set_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ value_ref.result(),
+ self.is_set_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ code.put_gotref(value_ref.result())
+ self.coerced_value_var.generate_evaluation_code(code)
+ self.value_target.generate_assignment_code(self.coerced_value_var, code)
+ value_ref.release(code)
+
+
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
@@ -6780,11 +6780,11 @@ class AsyncForStatNode(_ForInStatNode):
is_async = True
- def __init__(self, pos, **kw):
+ def __init__(self, pos, **kw):
assert 'item' not in kw
from . import ExprNodes
# AwaitExprNodes must appear before running MarkClosureVisitor
- kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
+ kw['item'] = ExprNodes.AwaitIterNextExprNode(kw['iterator'].pos, arg=None)
_ForInStatNode.__init__(self, pos, **kw)
def _create_item_node(self):
@@ -6841,27 +6841,27 @@ class ForFromStatNode(LoopNode, StatNode):
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
- self.set_up_loop(env)
- target_type = self.target.type
- if not (target_type.is_pyobject or target_type.is_numeric):
- error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
-
- self.body = self.body.analyse_expressions(env)
- if self.else_clause:
- self.else_clause = self.else_clause.analyse_expressions(env)
- return self
-
- def set_up_loop(self, env):
- from . import ExprNodes
-
- target_type = self.target.type
- if target_type.is_numeric:
- loop_type = target_type
+ self.set_up_loop(env)
+ target_type = self.target.type
+ if not (target_type.is_pyobject or target_type.is_numeric):
+ error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
+
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def set_up_loop(self, env):
+ from . import ExprNodes
+
+ target_type = self.target.type
+ if target_type.is_numeric:
+ loop_type = target_type
else:
- if target_type.is_enum:
- warning(self.target.pos,
- "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
- loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
+ if target_type.is_enum:
+ warning(self.target.pos,
+ "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
+ loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
@@ -6877,7 +6877,7 @@ class ForFromStatNode(LoopNode, StatNode):
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
- if target_type.is_numeric or target_type.is_enum:
+ if target_type.is_numeric or target_type.is_enum:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
@@ -6887,7 +6887,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
- self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
+ self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
@@ -6899,23 +6899,23 @@ class ForFromStatNode(LoopNode, StatNode):
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
- incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
- else:
- step = '1'
-
+ incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
+ else:
+ step = '1'
+
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
-
- loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
-
- if from_range and not self.is_py_target:
- loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
+
+ loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
+
+ if from_range and not self.is_py_target:
+ loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
else:
loopvar_name = self.loopvar_node.result()
- if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
+ if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
@@ -6929,18 +6929,18 @@ class ForFromStatNode(LoopNode, StatNode):
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
-
- coerced_loopvar_node = self.py_loopvar_node
- if coerced_loopvar_node is None and from_range:
- coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
- if coerced_loopvar_node is not None:
- coerced_loopvar_node.generate_evaluation_code(code)
- self.target.generate_assignment_code(coerced_loopvar_node, code)
-
+
+ coerced_loopvar_node = self.py_loopvar_node
+ if coerced_loopvar_node is None and from_range:
+ coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
+ if coerced_loopvar_node is not None:
+ coerced_loopvar_node.generate_evaluation_code(code)
+ self.target.generate_assignment_code(coerced_loopvar_node, code)
+
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
-
- if not from_range and self.py_loopvar_node:
+
+ if not from_range and self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
@@ -6972,17 +6972,17 @@ class ForFromStatNode(LoopNode, StatNode):
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
-
+
code.putln("}")
-
- if not from_range and self.py_loopvar_node:
+
+ if not from_range and self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
- if from_range and not self.is_py_target:
+ if from_range and not self.is_py_target:
code.funcstate.release_temp(loopvar_name)
-
+
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
@@ -7175,7 +7175,7 @@ class TryExceptStatNode(StatNode):
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
- in_generator = False
+ in_generator = False
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
@@ -7214,8 +7214,8 @@ class TryExceptStatNode(StatNode):
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
- try_break_label = code.new_label('try_break') if old_break_label else None
- try_continue_label = code.new_label('try_continue') if old_continue_label else None
+ try_break_label = code.new_label('try_break') if old_break_label else None
+ try_continue_label = code.new_label('try_continue') if old_continue_label else None
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
@@ -7236,9 +7236,9 @@ class TryExceptStatNode(StatNode):
if can_raise:
# inject code before the try block to save away the exception state
code.globalstate.use_utility_code(reset_exception_utility_code)
- if not self.in_generator:
- save_exc.putln("__Pyx_PyThreadState_declare")
- save_exc.putln("__Pyx_PyThreadState_assign")
+ if not self.in_generator:
+ save_exc.putln("__Pyx_PyThreadState_declare")
+ save_exc.putln("__Pyx_PyThreadState_assign")
save_exc.putln("__Pyx_ExceptionSave(%s);" % (
', '.join(['&%s' % var for var in exc_save_vars])))
for var in exc_save_vars:
@@ -7252,8 +7252,8 @@ class TryExceptStatNode(StatNode):
else:
# try block cannot raise exceptions, but we had to allocate the temps above,
# so just keep the C compiler from complaining about them being unused
- mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
- save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
+ mark_vars_used = ["(void)%s;" % var for var in exc_save_vars]
+ save_exc.putln("%s /* mark used */" % ' '.join(mark_vars_used))
def restore_saved_exception():
pass
@@ -7279,14 +7279,14 @@ class TryExceptStatNode(StatNode):
code.put_label(our_error_label)
for temp_name, temp_type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, temp_type)
-
- outer_except = code.funcstate.current_except
- # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed.
- code.funcstate.current_except = self
+
+ outer_except = code.funcstate.current_except
+ # Currently points to self, but the ExceptClauseNode would also be ok. Change if needed.
+ code.funcstate.current_except = self
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
- code.funcstate.current_except = outer_except
-
+ code.funcstate.current_except = outer_except
+
if not self.has_default_clause:
code.put_goto(except_error_label)
@@ -7383,42 +7383,42 @@ class ExceptClauseNode(Node):
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
-
+
if self.pattern:
- has_non_literals = not all(
- pattern.is_literal or pattern.is_simple() and not pattern.is_temp
- for pattern in self.pattern)
-
- if has_non_literals:
- # For non-trivial exception check expressions, hide the live exception from C-API calls.
- exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
- for _ in range(3)]
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
- code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
- code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
- exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
- else:
- exc_vars = ()
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
- exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
-
+ has_non_literals = not all(
+ pattern.is_literal or pattern.is_simple() and not pattern.is_temp
+ for pattern in self.pattern)
+
+ if has_non_literals:
+ # For non-trivial exception check expressions, hide the live exception from C-API calls.
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
+ code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ else:
+ exc_vars = ()
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
- exc_tests.append(exc_test_func % pattern.py_result())
+ exc_tests.append(exc_test_func % pattern.py_result())
- match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
- code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
+ match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
-
- if has_non_literals:
- code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
- for temp in exc_vars:
- code.funcstate.release_temp(temp)
-
+
+ if has_non_literals:
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
code.putln(
"if (%s) {" %
match_flag)
@@ -7437,7 +7437,7 @@ class ExceptClauseNode(Node):
code.putln("}")
return
- exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
@@ -7447,8 +7447,8 @@ class ExceptClauseNode(Node):
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (
exc_args, code.error_goto(self.pos)))
- for var in exc_vars:
- code.put_gotref(var)
+ for var in exc_vars:
+ code.put_gotref(var)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
@@ -7465,7 +7465,7 @@ class ExceptClauseNode(Node):
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
-
+
if not self.body.is_terminator:
for var in exc_vars:
# FIXME: XDECREF() is needed to allow re-raising (which clears the exc_vars),
@@ -7509,7 +7509,7 @@ class TryFinallyStatNode(StatNode):
# body StatNode
# finally_clause StatNode
# finally_except_clause deep-copy of finally_clause for exception case
- # in_generator inside of generator => must store away current exception also in return case
+ # in_generator inside of generator => must store away current exception also in return case
#
# Each of the continue, break, return and error gotos runs
# into its own deep-copy of the finally block code.
@@ -7527,7 +7527,7 @@ class TryFinallyStatNode(StatNode):
finally_except_clause = None
is_try_finally_in_nogil = False
- in_generator = False
+ in_generator = False
@staticmethod
def create_analysed(pos, env, body, finally_clause):
@@ -7599,10 +7599,10 @@ class TryFinallyStatNode(StatNode):
code.putln('}')
if preserve_error:
- code.put_label(new_error_label)
+ code.put_label(new_error_label)
code.putln('/*exception exit:*/{')
- if not self.in_generator:
- code.putln("__Pyx_PyThreadState_declare")
+ if not self.in_generator:
+ code.putln("__Pyx_PyThreadState_declare")
if self.is_try_finally_in_nogil:
code.declare_gilstate()
if needs_success_cleanup:
@@ -7650,47 +7650,47 @@ class TryFinallyStatNode(StatNode):
code.set_all_labels(old_labels)
return_label = code.return_label
- exc_vars = ()
-
+ exc_vars = ()
+
for i, (new_label, old_label) in enumerate(zip(new_labels, old_labels)):
if not code.label_used(new_label):
continue
if new_label == new_error_label and preserve_error:
continue # handled above
- code.putln('%s: {' % new_label)
+ code.putln('%s: {' % new_label)
ret_temp = None
- if old_label == return_label:
- # return actually raises an (uncatchable) exception in generators that we must preserve
- if self.in_generator:
- exc_vars = tuple([
- code.funcstate.allocate_temp(py_object_type, manage_ref=False)
- for _ in range(6)])
- self.put_error_catcher(code, [], exc_vars)
- if not self.finally_clause.is_terminator:
- # store away return value for later reuse
- if (self.func_return_type and
- not self.is_try_finally_in_nogil and
- not isinstance(self.finally_clause, GILExitNode)):
- ret_temp = code.funcstate.allocate_temp(
- self.func_return_type, manage_ref=False)
- code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
- if self.func_return_type.is_pyobject:
- code.putln("%s = 0;" % Naming.retval_cname)
-
- fresh_finally_clause().generate_execution_code(code)
-
- if old_label == return_label:
- if ret_temp:
- code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
+ if old_label == return_label:
+ # return actually raises an (uncatchable) exception in generators that we must preserve
+ if self.in_generator:
+ exc_vars = tuple([
+ code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+ for _ in range(6)])
+ self.put_error_catcher(code, [], exc_vars)
+ if not self.finally_clause.is_terminator:
+ # store away return value for later reuse
+ if (self.func_return_type and
+ not self.is_try_finally_in_nogil and
+ not isinstance(self.finally_clause, GILExitNode)):
+ ret_temp = code.funcstate.allocate_temp(
+ self.func_return_type, manage_ref=False)
+ code.putln("%s = %s;" % (ret_temp, Naming.retval_cname))
+ if self.func_return_type.is_pyobject:
+ code.putln("%s = 0;" % Naming.retval_cname)
+
+ fresh_finally_clause().generate_execution_code(code)
+
+ if old_label == return_label:
+ if ret_temp:
+ code.putln("%s = %s;" % (Naming.retval_cname, ret_temp))
if self.func_return_type.is_pyobject:
- code.putln("%s = 0;" % ret_temp)
- code.funcstate.release_temp(ret_temp)
- if self.in_generator:
- self.put_error_uncatcher(code, exc_vars)
+ code.putln("%s = 0;" % ret_temp)
+ code.funcstate.release_temp(ret_temp)
+ if self.in_generator:
+ self.put_error_uncatcher(code, exc_vars)
for cname in exc_vars:
code.funcstate.release_temp(cname)
-
+
if not self.finally_clause.is_terminator:
code.put_goto(old_label)
code.putln('}')
@@ -7705,7 +7705,7 @@ class TryFinallyStatNode(StatNode):
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, temps_to_clean_up, exc_vars,
- exc_lineno_cnames=None, exc_filename_cname=None):
+ exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(get_exception_utility_code)
code.globalstate.use_utility_code(swap_exception_utility_code)
@@ -7714,7 +7714,7 @@ class TryFinallyStatNode(StatNode):
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_PyThreadState_assign")
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
@@ -7738,7 +7738,7 @@ class TryFinallyStatNode(StatNode):
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
- def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
+ def put_error_uncatcher(self, code, exc_vars, exc_lineno_cnames=None, exc_filename_cname=None):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.globalstate.use_utility_code(reset_exception_utility_code)
@@ -7759,7 +7759,7 @@ class TryFinallyStatNode(StatNode):
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
- code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
if exc_lineno_cnames:
code.putln("%s = %s; %s = %s; %s = %s;" % (
Naming.lineno_cname, exc_lineno_cnames[0],
@@ -7818,7 +7818,7 @@ class GILStatNode(NogilTryFinallyStatNode):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
- if not collector.yields:
+ if not collector.yields:
return
if state == 'gil':
@@ -8235,17 +8235,17 @@ class ParallelStatNode(StatNode, ParallelNode):
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
- seen = set()
+ seen = set()
for dictitem in self.kwargs.key_value_pairs:
- if dictitem.key.value in seen:
- error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
- seen.add(dictitem.key.value)
+ if dictitem.key.value in seen:
+ error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
+ seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
- if not dictitem.value.is_none:
- self.num_threads = dictitem.value
+ if not dictitem.value.is_none:
+ self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
- if not dictitem.value.is_none:
- self.chunksize = dictitem.value
+ if not dictitem.value.is_none:
+ self.chunksize = dictitem.value
else:
pairs.append(dictitem)
@@ -8285,7 +8285,7 @@ class ParallelStatNode(StatNode, ParallelNode):
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
- if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
+ if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
@@ -8687,7 +8687,7 @@ class ParallelStatNode(StatNode, ParallelNode):
invalid_value = entry.type.invalid_value()
if invalid_value:
- init = ' = ' + entry.type.cast_code(invalid_value)
+ init = ' = ' + entry.type.cast_code(invalid_value)
else:
init = ''
# Declare the parallel private in the outer block